diff --git "a/val.json" "b/val.json" new file mode 100644--- /dev/null +++ "b/val.json" @@ -0,0 +1,49322 @@ +[ + { + "library": "matplotlib", + "name": "add_cell", + "source_code": "def add_cell(self, row, col, *args, **kwargs):\n xy = (0, 0)\n cell = Cell(xy, *args, visible_edges=self.edges, **kwargs)\n self[row, col] = cell\n return cell", + "docstring": "Create a cell and add it to the table. Parameters ---------- row : int Row index. col : int Column index. *args, **kwargs All other parameters are passed on to . Returns ------- The created cell.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:add_cell arg:self arg:row arg:col arguments arg arg arg arg arg Assign Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "blend_hsv", + "source_code": "def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None, hsv_min_val=None, hsv_min_sat=None):\n if hsv_max_sat is None:\n hsv_max_sat = self.hsv_max_sat\n if hsv_max_val is None:\n hsv_max_val = self.hsv_max_val\n if hsv_min_sat is None:\n hsv_min_sat = self.hsv_min_sat\n if hsv_min_val is None:\n hsv_min_val = self.hsv_min_val\n intensity = intensity[..., 0]\n intensity = 2 * intensity - 1\n hsv = rgb_to_hsv(rgb[:, :, 0:3])\n hue, sat, val = np.moveaxis(hsv, -1, 0)\n np.putmask(sat, (np.abs(sat) > 1e-10) & (intensity > 0), (1 - intensity) * sat + intensity * hsv_max_sat)\n np.putmask(sat, (np.abs(sat) > 1e-10) & (intensity < 0), (1 + intensity) * sat - intensity * hsv_min_sat)\n np.putmask(val, intensity > 0, (1 - intensity) * val + intensity * hsv_max_val)\n np.putmask(val, intensity < 0, (1 + intensity) * val - intensity * hsv_min_val)\n np.clip(hsv[:, :, 1:], 0, 1, out=hsv[:, :, 1:])\n return hsv_to_rgb(hsv)", + "docstring": "Take the input data array, convert to HSV values in the given colormap, then adjust those color values to give the impression of a shaded relief map with a specified light source. RGBA values are returned, which can then be used to plot the shaded image with imshow. The color of the resulting image will be darkened by moving the (s, v) values (in HSV colorspace) toward (hsv_min_sat, hsv_min_val) in the shaded regions, or lightened by sliding (s, v) toward (hsv_max_sat, hsv_max_val) in regions that are illuminated. The default extremes are chose so that completely shaded points are nearly black (s = 1, v = 0) and completely illuminated points are nearly white (s = 0, v = 1). Parameters ---------- rgb : An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image). intensity : An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image). hsv_max_sat : number, optional The maximum saturation value that the *intensity* map can shift the output image to. If not provided, use the value provided upon initialization. hsv_min_sat : number, optional The minimum saturation value that the *intensity* map can shift the output image to. If not provided, use the value provided upon initialization. hsv_max_val : number, optional The maximum value (\"v\" in \"hsv\") that the *intensity* map can shift the output image to. If not provided, use the value provided upon initialization. hsv_min_val : number, optional The minimum value (\"v\" in \"hsv\") that the *intensity* map can shift the output image to. If not provided, use the value provided upon initialization. Returns ------- An (M, N, 3) RGB array representing the combined images.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:blend_hsv arg:self arg:rgb arg:intensity arg:hsv_max_sat arg:hsv_max_val arg:hsv_min_val arg:hsv_min_sat arguments arg arg arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Assign Assign Call Assign Call Call Compare Call Compare Call Compare Call Compare Call Compare Call Compare Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "ev", + "source_code": "def ev(self, xi, yi, dx=0, dy=0):\n return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)", + "docstring": "Evaluate the spline at points Returns the interpolated value at `` of meshgrid would result in an unexpected (transposed) result after interpolation. >>> xarr = np.linspace(-3, 3, 21) >>> yarr = np.linspace(-3, 3, 21) >>> xgrid, ygrid = np.meshgrid(xarr, yarr, indexing=\"ij\") >>> zdata = f(xgrid, ygrid) >>> rbs = RectBivariateSpline(xarr, yarr, zdata, kx=1, ky=1) Next we sample the function along a diagonal slice through the coordinate space on a finer grid using interpolation. >>> xinterp = np.linspace(-3, 3, 201) >>> yinterp = np.linspace(3, -3, 201) >>> zinterp = rbs.ev(xinterp, yinterp) And check that the interpolation passes through the function evaluations as a function of the distance from the origin along the slice. >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(1, 1, 1) >>> ax1.plot(np.sqrt(xarr**2 + yarr**2), np.diag(zdata), \"or\") >>> ax1.plot(np.sqrt(xinterp**2 + yinterp**2), zinterp, \"-b\") >>> plt.show()", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "FunctionDef name:ev arg:self arg:xi arg:yi arg:dx arg:dy arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_box_kernel1d", + "source_code": "def get_box_kernel1d(kernel_size: int, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n scale = tensor(1.0 / kernel_size, device=device, dtype=dtype)\n return scale.expand(1, kernel_size)", + "docstring": "Return a 1-D box filter. Args: kernel_size: the size of the kernel. device: the desired device of returned tensor. dtype: the desired data type of returned tensor. Returns: A tensor with shape :math:, filled with the value :math:.", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:get_box_kernel1d arg:kernel_size arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "OptionError", + "source_code": "class OptionError(AttributeError, KeyError):\n pass", + "docstring": "Exception raised for pandas.options. Backwards compatible with KeyError checks. See Also -------- options : Access and modify global pandas settings. Examples -------- >>> pd.options.context Traceback (most recent call last): OptionError: No such option", + "type": "class", + "file_path": "pandas\\pandas\\_config\\config.py", + "ast_data": "ClassDef name:OptionError" + }, + { + "library": "matplotlib", + "name": "register", + "source_code": "def register(self, cmap, *, name=None, force=False):\n _api.check_isinstance(colors.Colormap, cmap=cmap)\n name = name or cmap.name\n if name in self:\n if not force:\n raise ValueError(f'A colormap named \"{name}\" is already registered.')\n elif name in self._builtin_cmaps:\n raise ValueError(f'Re-registering the builtin cmap {name!r} is not allowed.')\n _api.warn_external(f'Overwriting the cmap {name!r} that was already in the registry.')\n self._cmaps[name] = cmap.copy()\n if self._cmaps[name].name != name:\n self._cmaps[name].name = name", + "docstring": "Register a new colormap. The colormap name can then be used as a string argument to any `` is used. force : bool, default: False If False, a ValueError is raised if trying to overwrite an already registered name. True supports overwriting registered colormaps other than the builtin colormaps.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cm.py", + "ast_data": "FunctionDef name:register arg:self arg:cmap arguments arg arg arg arg Call Assign BoolOp If Compare If Raise Call If Compare Raise Call Call Assign Call If Compare Assign" + }, + { + "library": "seaborn", + "name": "build_plot_signature", + "source_code": "def build_plot_signature(cls):\n sig = inspect.signature(cls)\n params = [inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), inspect.Parameter('data', inspect.Parameter.KEYWORD_ONLY, default=None)]\n params.extend([inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None) for name in PROPERTIES])\n new_sig = sig.replace(parameters=params)\n cls.__signature__ = new_sig\n known_properties = textwrap.fill(', '.join([f'|{p}|' for p in PROPERTIES]), width=78, subsequent_indent=' ' * 8)\n if cls.__doc__ is not None:\n cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n return cls", + "docstring": "Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important.", + "type": "function", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "FunctionDef name:build_plot_signature arg:cls arguments arg Assign Call Assign Call Call Call Call Assign Call Assign Assign Call Call If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Variadic", + "source_code": "class Variadic(metaclass=VariadicSignatureMeta):\n pass", + "docstring": "A class whose getitem method can be used to generate a new type representing a specific variadic signature. Examples -------- >>> # xdoctest: +SKIP >>> Variadic[int] # any number of int arguments >>> Variadic[(int, str)] # any number of one of int or str arguments >>> issubclass(int, Variadic[int]) True >>> issubclass(int, Variadic[(int, str)]) True >>> issubclass(str, Variadic[(int, str)]) True >>> issubclass(float, Variadic[(int, str)]) False", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py", + "ast_data": "ClassDef name:Variadic" + }, + { + "library": "tensorflow", + "name": "_get_sparse_tensors", + "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n input_tensor = inputs.get(self)\n return self._get_sparse_tensors_for_input_tensor(input_tensor)", + "docstring": "Converts dense inputs to SparseTensor so downstream code can use it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "on_build_finished", + "source_code": "def on_build_finished(app: Sphinx, error: Exception) -> None:\n domain = app.env.domains['duration']\n if not domain.reading_durations:\n return\n durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)\n logger.info('')\n logger.info(__('====================== slowest reading durations ======================='))\n for docname, d in islice(durations, 5):\n logger.info(f'{d:.3f} {docname}')", + "docstring": "Display duration ranking on the current build.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\duration.py", + "ast_data": "FunctionDef name:on_build_finished arg:app arg:error arguments arg arg Assign If Return return:no Assign Call Call Call Call Call Call For Call Call" + }, + { + "library": "pytorch", + "name": "_ITraceObserver", + "source_code": "class _ITraceObserver(ABC):\n\n @abstractmethod\n def start(self):\n pass\n\n @abstractmethod\n def stop(self):\n pass\n\n @abstractmethod\n def cleanup(self):\n pass", + "docstring": "Abstract interface for a Trace observer. This satisfies 3 methods: start, stop and cleanup", + "type": "class", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "ClassDef name:_ITraceObserver FunctionDef name:start arg:self arguments arg FunctionDef name:stop arg:self arguments arg FunctionDef name:cleanup arg:self arguments arg" + }, + { + "library": "numpy", + "name": "from_data", + "source_code": "@classmethod\ndef from_data(cls, data, **options):\n format_functions = []\n for field_name in data.dtype.names:\n format_function = _get_format_function(data[field_name], **options)\n if data.dtype[field_name].shape != ():\n format_function = SubArrayFormat(format_function, **options)\n format_functions.append(format_function)\n return cls(format_functions)", + "docstring": "This is a second way to initialize StructuredVoidFormat, using the raw data as input. Added to avoid changing the signature of __init__.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:from_data arg:cls arg:data arguments arg arg arg Assign For Assign Call If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "multiply", + "source_code": "def multiply(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n return self._op(_AtOp.MULTIPLY, operator.imul, operator.mul, y, copy=copy, xp=xp)", + "docstring": "Apply `` and return the updated array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py", + "ast_data": "FunctionDef name:multiply arg:copy arg:xp arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reset", + "source_code": "def reset(self, name=None):\n if self._reader_ref.dtype == dtypes.resource:\n return gen_io_ops.reader_reset_v2(self._reader_ref, name=name)\n else:\n return gen_io_ops.reader_reset(self._reader_ref, name=name)", + "docstring": "Restore a reader to its initial clean state. Args: name: A name for the operation (optional). Returns: The created Operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", + "ast_data": "FunctionDef name:reset arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_minimum_control_deps", + "source_code": "def _minimum_control_deps(outputs):\n if context.executing_eagerly():\n return []\n outputs = nest.flatten(outputs, expand_composites=True)\n for out in outputs:\n if not isinstance(out, variables.Variable):\n return [out]\n return []", + "docstring": "Returns the minimum control dependencies to ensure step succeeded.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:_minimum_control_deps arg:outputs arguments arg If Call Return return:no Assign Call For If Call Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "_maybe_convert_i8", + "source_code": "def _maybe_convert_i8(self, key):\n if is_list_like(key):\n key = ensure_index(key)\n key = maybe_upcast_numeric_to_64bit(key)\n if not self._needs_i8_conversion(key):\n return key\n scalar = is_scalar(key)\n key_dtype = getattr(key, 'dtype', None)\n if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n if scalar:\n key_dtype, key_i8 = infer_dtype_from_scalar(key)\n if isinstance(key, Period):\n key_i8 = key.ordinal\n elif isinstance(key_i8, Timestamp):\n key_i8 = key_i8._value\n elif isinstance(key_i8, (np.datetime64, np.timedelta64)):\n key_i8 = key_i8.view('i8')\n else:\n key_dtype, key_i8 = (key.dtype, Index(key.asi8))\n if key.hasnans:\n key_i8 = key_i8.where(~key._isnan)\n subtype = self.dtype.subtype\n if subtype != key_dtype:\n raise ValueError(f'Cannot index an IntervalIndex of subtype {subtype} with values of dtype {key_dtype}')\n return key_i8", + "docstring": "Maybe convert a given key to its equivalent i8 value(s). Used as a preprocessing step prior to IntervalTree queries (self._engine), which expects numeric data. Parameters ---------- key : scalar or list-like The key that should maybe be converted to i8. Returns ------- scalar or list-like The original key if no conversion occurred, int if converted scalar, Index with an int64 dtype if converted list-like.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\interval.py", + "ast_data": "FunctionDef name:_maybe_convert_i8 arg:self arg:key arguments arg arg If Call Assign Call Assign Call If Call Return return:yes Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Assign Return return:yes Call If Assign Call If Call Assign If Call Assign If Call Assign Call Assign Call If Assign Call Assign If Compare Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_warnings", + "source_code": "def _check_warnings(self, *, params):\n params = {} if params is None else params\n warn_params = {prop for prop, alias in self._requests.items() if alias == WARN and prop in params}\n for param in warn_params:\n warn(f'Support for {param} has recently been added to this class. To maintain backward compatibility, it is ignored now. Using `set_{self.method}_request({param}={{True, False}})` on this method of the class, you can set the request value to False to silence this warning, or to True to consume and use the metadata.')", + "docstring": "Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- params : dict The metadata passed to a method.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:_check_warnings arg:self arguments arg arg Assign Compare Assign Call BoolOp Compare Compare For Call" + }, + { + "library": "pandas", + "name": "is_any_real_numeric_dtype", + "source_code": "def is_any_real_numeric_dtype(arr_or_dtype) -> bool:\n return is_numeric_dtype(arr_or_dtype) and (not is_complex_dtype(arr_or_dtype)) and (not is_bool_dtype(arr_or_dtype))", + "docstring": "Check whether the provided array or dtype is of a real number dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a real number dtype. See Also -------- is_numeric_dtype : Check if a dtype is numeric. is_complex_dtype : Check if a dtype is complex. is_bool_dtype : Check if a dtype is boolean. Examples -------- >>> from pandas.api.types import is_any_real_numeric_dtype >>> is_any_real_numeric_dtype(int) True >>> is_any_real_numeric_dtype(float) True >>> is_any_real_numeric_dtype(object) False >>> is_any_real_numeric_dtype(str) False >>> is_any_real_numeric_dtype(complex(1, 2)) False >>> is_any_real_numeric_dtype(bool) False", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\common.py", + "ast_data": "FunctionDef name:is_any_real_numeric_dtype arg:arr_or_dtype arguments arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "scipy", + "name": "to_discrete", + "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), dt, method=method, alpha=alpha)[:-1], dt=dt)", + "docstring": "Returns the discretized system. Parameters: See for details. Returns ------- sys: instance of and", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_maybe_rotate_dims", + "source_code": "def _maybe_rotate_dims(self, x, rotate_right=False):\n needs_rotation_const = tensor_util.constant_value(self._needs_rotation)\n if needs_rotation_const is not None and (not needs_rotation_const):\n return x\n ndims = array_ops.rank(x)\n n = ndims - self._rotate_ndims if rotate_right else self._rotate_ndims\n return array_ops.transpose(x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))", + "docstring": "Helper which rolls left event_dims left or right event_dims right.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_maybe_rotate_dims arg:self arg:x arg:rotate_right arguments arg arg arg Assign Call If BoolOp Compare Return return:yes Assign Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "retrieve_from_web", + "source_code": "def retrieve_from_web(generate_csv=False):\n url = 'https://developer.nvidia.com/cuda-gpus'\n source = urllib.request.urlopen(url)\n matches = []\n while True:\n line = source.readline()\n if '' in line:\n break\n else:\n gpu = re.search('([\\\\w\\\\S\\\\s\\\\d\\\\[\\\\]\\\\,]+[^*])(.*', line)\n if gpu:\n matches.append(gpu.group(1))\n elif capability:\n if capability.group(3):\n capability_str = capability.group(4) + '.' + capability.group(6)\n else:\n capability_str = capability.group(1) + '.' + capability.group(2)\n matches.append(capability_str)\n return create_gpu_capa_map(matches, generate_csv)", + "docstring": "Retrieves list of all CUDA compute capability from NVIDIA webpage. Args: generate_csv: Boolean for generating an output file containing the results. Returns: OrderedDict that is a list of all CUDA compute capability listed on the NVIDIA page. Order goes from top to bottom of the webpage content (.html).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", + "ast_data": "FunctionDef name:retrieve_from_web arg:generate_csv arguments arg Assign Assign Call Assign While Assign Call If Compare Assign Call Assign Call If Call Call If If Call Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "op", + "source_code": "@property\ndef op(self) -> ops.Operation:\n return self._variable.op", + "docstring": "The of this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_process_parameters", + "source_code": "def _process_parameters(self, n, p):\n eps = np.finfo(np.result_type(np.asarray(p), np.float32)).eps * 10\n p = np.array(p, dtype=np.float64, copy=True)\n p_adjusted = 1.0 - p[..., :-1].sum(axis=-1)\n i_adjusted = np.abs(1 - p.sum(axis=-1)) > eps\n p[i_adjusted, -1] = p_adjusted[i_adjusted]\n if np.any(i_adjusted):\n message = f'Some rows of `p` do not sum to 1.0 within tolerance of eps={eps!r}. Currently, the last element of these rows is adjusted to compensate, but this condition will produce NaNs beginning in SciPy 1.18.0. Please ensure that rows of `p` sum to 1.0 to avoid futher disruption.'\n warnings.warn(message, FutureWarning, stacklevel=3)\n pcond = np.any(p < 0, axis=-1)\n pcond |= np.any(p > 1, axis=-1)\n n = np.array(n, dtype=int, copy=True)\n ncond = n < 0\n return (n, p, ncond | pcond)", + "docstring": "Returns: n_, p_, npcond. n_ and p_ are arrays of the correct shape; npcond is a boolean array flagging values out of the domain.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_process_parameters arg:self arg:n arg:p arguments arg arg arg Assign Call Call Call Assign Call Assign Call Assign Compare Call Call Assign If Call Assign Call Assign Call Compare Call Compare Assign Call Assign Compare Return return:yes" + }, + { + "library": "pandas", + "name": "metadata", + "source_code": "@property\n@abstractmethod\ndef metadata(self) -> dict[str, Any]:\n pass", + "docstring": "The metadata for the data frame, as a dictionary with string keys. The contents of may be anything, they are meant for a library to store information that it needs to, e.g., roundtrip losslessly or for two implementations to share data that is not (yet) part of the interchange protocol specification. For avoiding collisions with other entries, please add name the keys with the name of the library followed by a period and the desired name, e.g, ``.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "FunctionDef name:metadata arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "outside_or_skip_tpu_context", + "source_code": "@contextlib.contextmanager\ndef outside_or_skip_tpu_context():\n ctx, graph = enclosing_tpu_context_and_graph()\n if ctx is None:\n yield\n else:\n saved_context = graph._get_control_flow_context()\n graph._set_control_flow_context(ctx.outer_context)\n yield\n graph._set_control_flow_context(saved_context)", + "docstring": "Returns a context manager that skips current enclosing context if there is any.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py", + "ast_data": "FunctionDef name:outside_or_skip_tpu_context arguments Assign Call If Compare Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "can_reorder_logs", + "source_code": "@staticmethod\ndef can_reorder_logs(fn, args, kwargs) -> True:\n allowed_input_types = (variables.TensorVariable, variables.ConstantVariable, StringFormatVariable)\n flat_args = pytree.tree_leaves([args, kwargs])\n for arg in flat_args:\n if not isinstance(arg, allowed_input_types):\n return False\n return True", + "docstring": "Run some additional checks for what sort of function calls can we actually reorder.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py", + "ast_data": "FunctionDef name:can_reorder_logs arg:fn arg:args arg:kwargs arguments arg arg arg Assign Assign Call For If Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "InvalidBasesError", + "source_code": "class InvalidBasesError(ValueError):\n pass", + "docstring": "A model's base classes can't be resolved.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\exceptions.py", + "ast_data": "ClassDef name:InvalidBasesError" + }, + { + "library": "tensorflow", + "name": "batch_norm_op", + "source_code": "def batch_norm_op(tensor, mean, variance, beta, gamma, scale):\n test_util.set_producer_version(ops.get_default_graph(), 8)\n return gen_nn_ops._batch_norm_with_global_normalization(tensor, mean, variance, beta, gamma, 0.001, scale)", + "docstring": "Fused kernel for batch normalization.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\batch_norm_benchmark.py", + "ast_data": "FunctionDef name:batch_norm_op arg:tensor arg:mean arg:variance arg:beta arg:gamma arg:scale arguments arg arg arg arg arg arg Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "convert", + "source_code": "def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str) -> tuple[Index, Index]:\n assert isinstance(values, np.ndarray), type(values)\n index = RangeIndex(len(values))\n return (index, index)", + "docstring": "Convert the data from this selection to the appropriate pandas type. Parameters ---------- values : np.ndarray nan_rep : str encoding : str errors : str", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:convert arg:self arg:values arg:nan_rep arg:encoding arg:errors arguments arg arg arg arg arg Call Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "_populate_directed_relation_graph", + "source_code": "def _populate_directed_relation_graph(self):\n related_objects_graph = defaultdict(list)\n all_models = self.apps.get_models(include_auto_created=True)\n for model in all_models:\n opts = model._meta\n if opts.abstract:\n continue\n fields_with_relations = (f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None)\n for f in fields_with_relations:\n if not isinstance(f.remote_field.model, str):\n remote_label = f.remote_field.model._meta.concrete_model._meta.label\n related_objects_graph[remote_label].append(f)\n for model in all_models:\n related_objects = related_objects_graph[model._meta.concrete_model._meta.label]\n model._meta.__dict__['_relation_tree'] = related_objects\n return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)", + "docstring": "This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model.", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:_populate_directed_relation_graph arg:self arguments arg Assign Call Assign Call For Assign If Assign Call BoolOp Compare For If Call Assign Call For Assign Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "with_alpha", + "source_code": "def with_alpha(self, alpha):\n if not isinstance(alpha, Real):\n raise TypeError(f\"'alpha' must be numeric or None, not {type(alpha)}\")\n if not 0 <= alpha <= 1:\n ValueError(\"'alpha' must be between 0 and 1, inclusive\")\n new_cm = self.copy()\n if not new_cm._isinit:\n new_cm._init()\n new_cm._lut[:, 3] = alpha\n return new_cm", + "docstring": "Return a copy of the colormap with a new uniform transparency. Parameters ---------- alpha : float The alpha blending value, between 0 (transparent) and 1 (opaque).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:with_alpha arg:self arg:alpha arguments arg arg If Call Raise Call Call If Compare Call Assign Call If Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_redirect_url", + "source_code": "def get_redirect_url(self, *args, **kwargs):\n if self.url:\n url = self.url % kwargs\n elif self.pattern_name:\n url = reverse(self.pattern_name, args=args, kwargs=kwargs)\n else:\n return None\n args = self.request.META.get('QUERY_STRING', '')\n if args and self.query_string:\n url = '%s?%s' % (url, args)\n return url", + "docstring": "Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method.", + "type": "method", + "file_path": "django\\django\\views\\generic\\base.py", + "ast_data": "FunctionDef name:get_redirect_url arg:self arguments arg arg arg If Assign If Assign Call Return return:no Assign Call If BoolOp Assign Return return:yes" + }, + { + "library": "django", + "name": "ValuesIterable", + "source_code": "class ValuesIterable(BaseIterable):\n\n def __iter__(self):\n queryset = self.queryset\n query = queryset.query\n compiler = query.get_compiler(queryset.db)\n if query.selected:\n names = list(query.selected)\n else:\n names = [*query.extra_select, *query.values_select, *query.annotation_select]\n indexes = range(len(names))\n for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):\n yield {names[i]: row[i] for i in indexes}", + "docstring": "Iterable returned by QuerySet.values() that yields a dict for each row.", + "type": "class", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "ClassDef name:ValuesIterable FunctionDef name:__iter__ arg:self arguments arg Assign Assign Assign Call If Assign Call Assign Assign Call Call For Call" + }, + { + "library": "tensorflow", + "name": "_convert_to_sparse_tensors", + "source_code": "def _convert_to_sparse_tensors(sp_inputs):\n if isinstance(sp_inputs, list):\n return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]\n if isinstance(sp_inputs, tuple):\n return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)\n raise TypeError('Inputs must be a list or tuple.')", + "docstring": "Convert to objects and return them. Args: sp_inputs: or of or objects. Returns: converted to objects. Raises: ValueError: if any item in is neither nor .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:_convert_to_sparse_tensors arg:sp_inputs arguments arg If Call Return return:yes Call If Call Return return:yes Call Raise Call" + }, + { + "library": "seaborn", + "name": "map_offdiag", + "source_code": "def map_offdiag(self, func, **kwargs):\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self", + "docstring": "Plot with a bivariate function on the off-diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the \"currently active\" matplotlib Axes. Also needs to accept kwargs called ``.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:map_offdiag arg:self arg:func arguments arg arg arg If Call If Call Assign For Call For Call If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "extract_variable_info", + "source_code": "def extract_variable_info(kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]:\n if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n if 'shape' in kwargs['initial_value'].keywords:\n shape = kwargs['initial_value'].keywords['shape']\n else:\n shape = kwargs['initial_value'].args[0]\n return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func)\n elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n else:\n return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'])", + "docstring": "Extracts the variable creation attributes from the kwargs. Args: kwargs: a dict of keyword arguments that were passed to a variable creator scope. Returns: A tuple of variable name, shape, dtype, initialization function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:extract_variable_info arg:kwargs arguments arg If BoolOp Call BoolOp Compare If Compare Assign Assign Return return:yes Call If BoolOp Compare Compare Call Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_import_submodules", + "source_code": "def _import_submodules(self):\n imported_modules = set(self._module_imports.keys())\n for module in imported_modules:\n if not module:\n continue\n module_split = module.split('.')\n parent_module = ''\n for submodule_index in range(len(module_split)):\n if submodule_index > 0:\n submodule = module_split[submodule_index - 1]\n parent_module += '.' + submodule if parent_module else submodule\n import_from = self._output_package\n if self._lazy_loading:\n import_from += '.' + '.'.join(module_split[:submodule_index + 1])\n self.add_import(symbol=None, source_module_name='', source_name=import_from, dest_module_name=parent_module, dest_name=module_split[submodule_index])\n else:\n if self._use_relative_imports:\n import_from = '.'\n elif submodule_index > 0:\n import_from += '.' + '.'.join(module_split[:submodule_index])\n self.add_import(symbol=None, source_module_name=import_from, source_name=module_split[submodule_index], dest_module_name=parent_module, dest_name=module_split[submodule_index])", + "docstring": "Add imports for all destination modules in self._module_imports.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py", + "ast_data": "FunctionDef name:_import_submodules arg:self arguments arg Assign Call Call For If Assign Call Assign For Call Call If Compare Assign Assign If Call Call If Assign If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "multinomial_categorical_impl", + "source_code": "def multinomial_categorical_impl(logits, num_samples, dtype, seed):\n logits = ops.convert_to_tensor(logits, name='logits')\n dtype = dtypes.as_dtype(dtype) if dtype else dtypes.int64\n accepted_dtypes = (dtypes.int32, dtypes.int64)\n if dtype not in accepted_dtypes:\n raise ValueError(f'Argument `dtype` got invalid value {dtype}. Accepted dtypes are {accepted_dtypes}.')\n seed1, seed2 = random_seed.get_seed(seed)\n return gen_random_ops.multinomial(logits, num_samples, seed=seed1, seed2=seed2, output_dtype=dtype)", + "docstring": "Implementation for random.categorical (v1) and random.categorical (v2).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", + "ast_data": "FunctionDef name:multinomial_categorical_impl arg:logits arg:num_samples arg:dtype arg:seed arguments arg arg arg arg Assign Call Assign Call Assign If Compare Raise Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_prepare_n_shadows_add_loggers_model", + "source_code": "def _prepare_n_shadows_add_loggers_model(model: torch.nn.Module, example_inputs: Any, qconfig_mapping: QConfigMapping, backend_config: BackendConfig) -> torch.nn.Module:\n tracer = quantize_fx.QuantizationTracer([], [])\n mt = torch.fx.GraphModule(model, tracer.trace(model))\n mt._node_name_to_scope = tracer.node_name_to_scope\n output_prop = OutputProp(mt)\n output_prop.propagate(*example_inputs)\n modules = dict(mt.named_modules(remove_duplicate=False))\n patterns = _get_pattern_to_quantize_handlers(backend_config)\n root_node_getter_mapping = get_fusion_pattern_to_root_node_getter(backend_config)\n standalone_module_names: list[str] = []\n standalone_module_classes: list[type] = []\n custom_module_classes: list[type] = []\n matches = _find_matches(mt.graph, modules, patterns, root_node_getter_mapping, standalone_module_names, standalone_module_classes, custom_module_classes)\n subgraphs_dedup: dict[str, list[Node]] = _get_dedup_subgraphs(matches)\n node_name_to_qconfig = _generate_node_name_to_qconfig(mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope)\n create_add_loggers_graph(mt, subgraphs_dedup, qconfig_mapping, node_name_to_qconfig)\n return mt", + "docstring": "Note: this API is not recommended for wide usage, it is only provided for customers who need to migrate from the API. This creates a model which provides logging for the following problem: if we quantize with and feed the same input through both models, log the comparisons of corresponding intermediate layers. The problem is solved with a single model. Specifically, we partition into N subgraphs, create a copy of each relevant subgraph, wrap it in a module, apply the quantization API to that module, and hook up loggers to measure the comparisons. Example starting graph: x0 -> op0 -> x1 -> op1 -> x2 Example config: quantize op0 to int8, do nothing to op1. The following graph will be created: .. code:: x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log \\ \\ \\ # noqa: W605 ---> op0_1 -> x1_1 ----> clog -> op1_0 -> x2_1 ----> clog Where op0_0 is op0, op0_1 is op0 wrapped in a submodule and quantized to int8, op1_0 is op1 (appearing in the graph twice), log is a logger, and clog is a comparison logger.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py", + "ast_data": "FunctionDef name:_prepare_n_shadows_add_loggers_model arg:model arg:example_inputs arg:qconfig_mapping arg:backend_config arguments arg arg arg arg Assign Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "normal_transform_pixel", + "source_code": "def normal_transform_pixel(height: int, width: int, eps: float=1e-14, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n tr_mat = tensor([[1.0, 0.0, -1.0], [0.0, 1.0, -1.0], [0.0, 0.0, 1.0]], device=device, dtype=dtype)\n width_denom: float = eps if width == 1 else width - 1.0\n height_denom: float = eps if height == 1 else height - 1.0\n tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / width_denom\n tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / height_denom\n return tr_mat.unsqueeze(0)", + "docstring": "Compute the normalization matrix from image size in pixels to [-1, 1]. Args: height: image height. width: image width. eps: epsilon to prevent divide-by-zero errors device: device to place the result on. dtype: dtype of the result. Returns: normalized transform with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:normal_transform_pixel arg:height arg:width arg:eps arg:device arg:dtype arguments arg arg arg arg arg Assign Call Compare Compare Assign Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n X = validate_data(self, X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)\n return self._raw_predict(X).ravel()", + "docstring": "Predict regression target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "greater", + "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef greater(x1, x2):\n return compare_chararrays(x1, x2, '>', True)", + "docstring": "Return (x1 > x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. See Also -------- equal, not_equal, greater_equal, less_equal, less Examples -------- >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater(x1, 'b') array([False, False, True])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:greater arg:x1 arg:x2 arguments arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "validate_c_hash", + "source_code": "def validate_c_hash(self):\n code = self.params.get('code')\n c_hash = self.get('c_hash')\n if code:\n if not c_hash:\n raise MissingClaimError('c_hash')\n if not _verify_hash(c_hash, code, self.header['alg']):\n raise InvalidClaimError('c_hash')", + "docstring": "Code hash value. Its value is the base64url encoding of the left-most half of the hash of the octets of the ASCII representation of the code value, where the hash algorithm used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE Header. For instance, if the alg is HS512, hash the code value with SHA-512, then take the left-most 256 bits and base64url encode them. The c_hash value is a case sensitive string. If the ID Token is issued from the Authorization Endpoint with a code, which is the case for the response_type values code id_token and code id_token token, this is REQUIRED; otherwise, its inclusion is OPTIONAL.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\claims.py", + "ast_data": "FunctionDef name:validate_c_hash arg:self arguments arg Assign Call Assign Call If If Raise Call If Call Raise Call" + }, + { + "library": "numpy", + "name": "median", + "source_code": "def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n if not hasattr(a, 'mask'):\n m = np.median(getdata(a, subok=True), axis=axis, out=out, overwrite_input=overwrite_input, keepdims=keepdims)\n if isinstance(m, np.ndarray) and 1 <= m.ndim:\n return masked_array(m, copy=False)\n else:\n return m\n return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, overwrite_input=overwrite_input)", + "docstring": "Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int, optional Axis along which the medians are computed. The default (None) is to compute the median along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if is True, and the input is not already an , an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. Returns ------- median : ndarray A new array holding the result is returned unless out is specified, in which case a reference to out is returned. Return data-type is for integers and floats smaller than , or the input data-type, otherwise. See Also -------- mean Notes ----- Given a vector `` is even. Examples -------- >>> import numpy as np >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) >>> np.ma.median(x) 1.5 >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) >>> np.ma.median(x) 2.5 >>> np.ma.median(x, axis=-1, overwrite_input=True) masked_array(data=[2.0, 5.0], mask=[False, False], fill_value=1e+20)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:median arg:a arg:axis arg:out arg:overwrite_input arg:keepdims arguments arg arg arg arg arg If Call Assign Call Call If BoolOp Call Compare Return return:yes Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_parse_flags_tolerate_undef", + "source_code": "def _parse_flags_tolerate_undef(argv):\n return flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)", + "docstring": "Parse args, returning any unknown flags (ABSL defaults to crashing).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\app.py", + "ast_data": "FunctionDef name:_parse_flags_tolerate_undef arg:argv arguments arg Return return:yes Call Compare" + }, + { + "library": "scipy", + "name": "trimmed_mean_ci", + "source_code": "def trimmed_mean_ci(data, limits=(0.2, 0.2), inclusive=(True, True), alpha=0.05, axis=None):\n data = ma.array(data, copy=False)\n trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)\n tmean = trimmed.mean(axis)\n tstde = mstats.trimmed_stde(data, limits=limits, inclusive=inclusive, axis=axis)\n df = trimmed.count(axis) - 1\n tppf = t.ppf(1 - alpha / 2.0, df)\n return np.array((tmean - tppf * tstde, tmean + tppf * tstde))", + "docstring": "Selected confidence interval of the trimmed mean along the given axis. Parameters ---------- data : array_like Input data. limits : {None, tuple}, optional None or a two item tuple. Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. If `data`. Defaults to None. Returns ------- trimmed_mean_ci : (2,) ndarray The lower and upper confidence intervals of the trimmed data.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_extras.py", + "ast_data": "FunctionDef name:trimmed_mean_ci arg:data arg:limits arg:inclusive arg:alpha arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "desc_returns", + "source_code": "class desc_returns(desc_type):\n\n def astext(self) -> str:\n return ' -> ' + super().astext()", + "docstring": "Node for a \"returns\" annotation (a la -> in Python).", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_returns FunctionDef name:astext arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_make_output_composite_tensors_match", + "source_code": "def _make_output_composite_tensors_match(op_type, branch_graphs):\n assert branch_graphs\n branch_outputs = [g.structured_outputs for g in branch_graphs]\n outputs_per_branch = list((len(outs) for outs in branch_outputs))\n assert len(set(outputs_per_branch)) == 1, outputs_per_branch\n for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n if len(set((type(out) for out in branch_outs))) == 1:\n continue\n if not any((isinstance(out, indexed_slices.IndexedSlices) for out in branch_outs)):\n continue\n for branch_idx, branch_out in enumerate(branch_outs):\n if isinstance(branch_out, indexed_slices.IndexedSlices):\n continue\n elif isinstance(branch_out, tensor_lib.Tensor):\n with branch_graphs[branch_idx].as_default():\n branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(branch_out)\n else:\n raise TypeError('Cannot reconcile {op_name} {output_idx}-th outputs:\\n outputs from all branches: {outputs}'.format(op_name='tf.cond' if op_type == _COND else 'tf.switch_case', output_idx=output_idx, outputs=branch_outs))\n for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):\n branch_graph.structured_outputs = branch_outs\n branch_graph.outputs = [t for t in func_graph_module.flatten(branch_outs) if t is not None]", + "docstring": "Modifies each branch_graph's outputs to have the same output signature. Currently the only transformation implemented is turning a Tensor into an equivalent IndexedSlices if the other branch returns an IndexedSlices. Updates branch_graph.{outputs,structured_outputs} for each branch_graph in branch_graphs. Args: op_type: _COND or _CASE branch_graphs: of Raises: TypeError: if a set of outputs cannot be rewritten.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:_make_output_composite_tensors_match arg:op_type arg:branch_graphs arguments arg arg Assign Assign Call Call Compare Call Call For Call Call If Compare Call Call Call If Call Call For Call If Call If Call With Call Assign Call Raise Call Call Compare For Call Assign Assign Call Compare" + }, + { + "library": "pytorch", + "name": "check_keys", + "source_code": "def check_keys(self, keys: Iterable[str]) -> tuple[list[str], list[str]]:\n keys = set(keys)\n valid_keys = {name for name, _ in self.named_tensors(remove_duplicate=False)}\n missing_keys = valid_keys - keys\n unexpected_keys = keys - valid_keys\n return (sorted(missing_keys), sorted(unexpected_keys))", + "docstring": "Check that the given keys are valid.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", + "ast_data": "FunctionDef name:check_keys arg:self arg:keys arguments arg arg Assign Call Assign Call Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "enable_cuda_sanitizer", + "source_code": "def enable_cuda_sanitizer():\n cuda_sanitizer.enable()", + "docstring": "Enable CUDA Sanitizer. The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions for synchronization errors. All data races found will be printed to the standard error output along with stack traces of suspected causes. For best results, the sanitizer should be enabled at the very beginning of the program.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", + "ast_data": "FunctionDef name:enable_cuda_sanitizer arguments Call" + }, + { + "library": "pytorch", + "name": "byteswap", + "source_code": "def byteswap(self, dtype):\n elem_size = torch._utils._element_size(dtype)\n if dtype.is_complex:\n elem_size = max(int(elem_size / 2), 1)\n self._byteswap(elem_size)", + "docstring": "Swap bytes in underlying data.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:byteswap arg:self arg:dtype arguments arg arg Assign Call If Assign Call Call Call" + }, + { + "library": "django", + "name": "column_sql", + "source_code": "def column_sql(self, model, field, include_default=False):\n field_db_params = field.db_parameters(connection=self.connection)\n column_db_type = field_db_params['type']\n if column_db_type is None:\n return (None, None)\n params = []\n return (' '.join(self._iter_column_sql(column_db_type, params, model, field, field_db_params, include_default)), params)", + "docstring": "Return the column definition for a field. The field must already have had set_attributes_from_name() called.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:column_sql arg:self arg:model arg:field arg:include_default arguments arg arg arg arg Assign Call Assign If Compare Return return:no Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_fusion_candidates", + "source_code": "def get_fusion_candidates(rule: GroupBatchFusionBase, root_node: torch.fx.Node, fused_set: OrderedSet[torch.fx.Node]) -> collections.defaultdict[Any, list[torch.fx.Node]]:\n q: collections.deque[tuple[int, torch.fx.Node]] = collections.deque()\n candidate_dict: collections.defaultdict[Any, list[torch.fx.Node]] = collections.defaultdict(list)\n if root_node.target in SEARCH_EXCLUSIONS:\n return candidate_dict\n visited_set = OrderedSet[torch.fx.Node]()\n for next_node in root_node.all_input_nodes:\n q.append((1, next_node))\n visited_set.add(next_node)\n while len(q) > 0:\n depth, node = q.popleft()\n if node in fused_set:\n continue\n key = rule.match(node)\n if key is not None:\n candidate_nodes = candidate_dict[key]\n if node not in candidate_nodes:\n candidate_nodes.append(node)\n elif depth < rule.graph_search_options['max_fuse_search_depth']:\n for next_node in node.all_input_nodes:\n if next_node not in visited_set:\n visited_set.add(next_node)\n q.append((depth + 1, next_node))\n return candidate_dict", + "docstring": "Search fusion candidates for a specific rule using BFS starting from the root node. We only search the subgraph within graph_search_options[\"max_fuse_search_depth\"].", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py", + "ast_data": "FunctionDef name:get_fusion_candidates arg:rule arg:root_node arg:fused_set arguments arg arg arg Call Call If Compare Return return:yes Assign Call For Call Call While Compare Call Assign Call If Compare Assign Call If Compare Assign If Compare Call If Compare For If Compare Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "add_to_uri", + "source_code": "def add_to_uri(token, uri):\n return add_params_to_uri(uri, [('access_token', token)])", + "docstring": "Add a Bearer Token to the request URI. Not recommended, use only if client can't use authorization header or body.", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6750\\parameters.py", + "ast_data": "FunctionDef name:add_to_uri arg:token arg:uri arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_output_file_path", + "source_code": "def get_output_file_path(self) -> Optional[str]:\n if self.output_file_path:\n return self.output_file_path\n else:\n return None", + "docstring": "Returns the output file name or None.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:get_output_file_path arg:self arguments arg If Return return:yes Return return:no" + }, + { + "library": "django", + "name": "PersistentRemoteUserMiddleware", + "source_code": "class PersistentRemoteUserMiddleware(RemoteUserMiddleware):\n force_logout_if_no_header = False", + "docstring": "Middleware for web-server provided authentication on logon pages. Like RemoteUserMiddleware but keeps the user authenticated even if the `` key is not found in the request. Useful for setups when the external authentication is only expected to happen on some \"logon\" URL and the rest of the application wants to use Django's authentication mechanism.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\middleware.py", + "ast_data": "ClassDef name:PersistentRemoteUserMiddleware Assign" + }, + { + "library": "seaborn", + "name": "SemanticMapping", + "source_code": "class SemanticMapping:\n map_type: str | None = None\n levels = None\n lookup_table = None\n\n def __init__(self, plotter):\n self.plotter = plotter\n\n def _check_list_length(self, levels, values, variable):\n message = ''\n if len(levels) > len(values):\n message = ' '.join([f'\\nThe {variable} list has fewer values ({len(values)})', f'than needed ({len(levels)}) and will cycle, which may', 'produce an uninterpretable plot.'])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n elif len(values) > len(levels):\n message = ' '.join([f'The {variable} list has more values ({len(values)})', f'than needed ({len(levels)}), which may not be intended.'])\n values = values[:len(levels)]\n if message:\n warnings.warn(message, UserWarning, stacklevel=6)\n return values\n\n def _lookup_single(self, key):\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)", + "docstring": "Base class for mapping data values to plot attributes.", + "type": "class", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "ClassDef name:SemanticMapping Assign Assign FunctionDef name:__init__ arg:self arg:plotter arguments arg arg Assign FunctionDef name:_check_list_length arg:self arg:levels arg:values arg:variable arguments arg arg arg arg Assign If Compare Call Call Assign Call Call Call Assign Call Call If Compare Call Call Assign Call Call Call Assign Call If Call Return return:yes FunctionDef name:_lookup_single arg:self arg:key arguments arg arg Return return:yes FunctionDef name:__call__ arg:self arg:key arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ArgsKwargsPair", + "source_code": "@compatibility(is_backward_compatible=False)\nclass ArgsKwargsPair(NamedTuple):\n args: tuple[Any, ...]\n kwargs: dict[str, Any]", + "docstring": "Simple named tuple for wrapping args/kwargs pairs.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\operator_schemas.py", + "ast_data": "ClassDef name:ArgsKwargsPair Call" + }, + { + "library": "scipy", + "name": "Decanomial", + "source_code": "class Decanomial(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = [(0, 2.5), (-2, -4)]\n self.global_optimum = [[2.0, -3.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n val = x[1] ** 4 + 12 * x[1] ** 3 + 54 * x[1] ** 2 + 108 * x[1] + 81.0\n val2 = x[0] ** 10.0 - 20 * x[0] ** 9 + 180 * x[0] ** 8 - 960 * x[0] ** 7\n val2 += 3360 * x[0] ** 6 - 8064 * x[0] ** 5 + 13340 * x[0] ** 4\n val2 += -15360 * x[0] ** 3 + 11520 * x[0] ** 2 - 5120 * x[0] + 2624\n return 0.001 * (abs(val) + abs(val2)) ** 2.0", + "docstring": "Decanomial objective function. This class defines the Decanomial function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Decanomial}}(x) = 0.001 \\left(\\lvert{x_{2}^{4} + 12 x_{2}^{3} + 54 x_{2}^{2} + 108 x_{2} + 81.0}\\rvert + \\lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6} - 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\\rvert\\right)^{2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py", + "ast_data": "ClassDef name:Decanomial FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_has_precomputed_nrows", + "source_code": "def _has_precomputed_nrows(self):\n return self._nrows is not None", + "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_has_precomputed_nrows arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_wrap_user_constructor", + "source_code": "def _wrap_user_constructor(cls):\n user_constructor = cls.__init__\n\n def wrapped_init(self, *args, **kwargs):\n self.__dict__[_IN_CONSTRUCTOR] = True\n user_constructor(self, *args, **kwargs)\n del self.__dict__[_IN_CONSTRUCTOR]\n self._tf_extension_type_convert_fields()\n self.__validate__()\n cls.__init__ = tf_decorator.make_decorator(user_constructor, wrapped_init)", + "docstring": "Wraps a user-defined constructor for tf.ExtensionType subclass .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "FunctionDef name:_wrap_user_constructor arg:cls arguments arg Assign FunctionDef name:wrapped_init arg:self arguments arg arg arg Assign Call Call Call Assign Call" + }, + { + "library": "numpy", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return tuple(((stop - start - 1) // step + 1 for start, stop, step in zip(self.start, self.stop, self.step)))", + "docstring": "The shape of the array to be iterated over. For an example, see .", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_arrayterator_impl.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_CacheKeyState", + "source_code": "@dataclass_slots\n@dataclass\nclass _CacheKeyState:\n sym_node_lookup: dict[int, int]\n known_symbols: set[sympy.Symbol]\n shape_env: Optional[ShapeEnv]\n\n def __init__(self, shape_env: Optional[ShapeEnv]=None) -> None:\n self.sym_node_lookup = {}\n self.known_symbols = set()\n self.shape_env = shape_env\n\n def cache_on_shape_env(self) -> bool:\n return bool(self.sym_node_lookup)\n\n def convert_sym_int(self, result: list[object], arg: SymInt) -> None:\n node_id = id(arg.node)\n if node_id in self.sym_node_lookup:\n result.append(_InputBackref(self.sym_node_lookup[node_id]))\n else:\n self.sym_node_lookup[node_id] = len(result)\n self.known_symbols.update(arg.node.expr.free_symbols)\n if self.shape_env is None:\n self.shape_env = arg.node.shape_env\n result.append(_PySymInputStub(arg))\n\n def convert_output(self, arg: _MetadataIntLike) -> _MetadataIntLike:\n if isinstance(arg, SymInt):\n return _SymIntOutputStub(arg, self.sym_node_lookup.get(id(arg.node), None))\n else:\n return arg", + "docstring": "State used while building our cache key.", + "type": "class", + "file_path": "pytorch\\torch\\_subclasses\\_fake_tensor_utils.py", + "ast_data": "ClassDef name:_CacheKeyState FunctionDef name:__init__ arg:self arg:shape_env arguments arg arg Assign Assign Call Assign FunctionDef name:cache_on_shape_env arg:self arguments arg Return return:yes Call FunctionDef name:convert_sym_int arg:self arg:result arg:arg arguments arg arg arg Assign Call If Compare Call Call Assign Call Call If Compare Assign Call Call FunctionDef name:convert_output arg:self arg:arg arguments arg arg If Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ObservationType", + "source_code": "class ObservationType(Enum):\n OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0\n 'this means input and output are observed with different observers, based\\n on qconfig.activation\\n example: conv, linear, softmax\\n '\n OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1\n 'this means the output will use the same observer instance as input, based\\n on qconfig.activation\\n example: torch.cat, maxpool\\n '\n INPUT_OUTPUT_NOT_OBSERVED = 2\n 'this means the input and output are never observed\\n example: x.shape, x.size\\n '", + "docstring": "An enum that represents different ways of how an operator/operator pattern should be observed", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "ClassDef name:ObservationType Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "get_config", + "source_code": "def get_config():\n return _get_threadlocal_config().copy()", + "docstring": "Retrieve current values for configuration set by :func:. Returns ------- config : dict Keys are parameter names that can be passed to :func:. See Also -------- config_context : Context manager for global scikit-learn configuration. set_config : Set global scikit-learn configuration. Examples -------- >>> import sklearn >>> config = sklearn.get_config() >>> config.keys() dict_keys([...])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\_config.py", + "ast_data": "FunctionDef name:get_config arguments Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "batch_scatter_update", + "source_code": "@tf_export(v1=['batch_scatter_update'])\n@deprecation.deprecated('2018-11-29', 'Use the batch_scatter_update method of Variable instead.')\ndef batch_scatter_update(ref, indices, updates, use_locking=True, name=None):\n with ops.name_scope(name):\n indices = ops.convert_to_tensor(indices, name='indices')\n indices_shape = array_ops.shape(indices)\n indices_dimensions = indices.get_shape().ndims\n if indices_dimensions is None:\n raise ValueError('batch_gather does not allow indices with unknown shape.')\n nd_indices = array_ops.expand_dims(indices, axis=-1)\n nd_indices_list = []\n for dimension in range(indices_dimensions - 1):\n dimension_size = indices_shape[dimension]\n shape_to_broadcast = [1] * (indices_dimensions + 1)\n shape_to_broadcast[dimension] = dimension_size\n dimension_range = array_ops.reshape(gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)\n if dimension_range.dtype.base_dtype != nd_indices.dtype:\n dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype)\n nd_indices_list.append(dimension_range * array_ops.ones_like(nd_indices))\n nd_indices_list.append(nd_indices)\n final_indices = array_ops.concat(nd_indices_list, axis=-1)\n return scatter_nd_update(ref, final_indices, updates, use_locking=use_locking)", + "docstring": "Generalization of to axis different than 0. Analogous to . This assumes that , and have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: where And the operation performed can be expressed as: When indices is a 1D tensor, this operation is equivalent to . To avoid this operation there would be 2 alternatives: 1) Reshaping the variable by merging the first dimensions. However, this is not possible because returns a Tensor, which we cannot use on. 2) Looping over the first of the variable and using on the subtensors that result of slicing the first dimension. This is a valid option for , but less efficient than this implementation. See also and . Args: ref: to scatter onto. indices: Tensor containing indices as described above. updates: Tensor of updates to apply to . use_locking: Boolean indicating whether to lock the writing operation. name: Optional scope name string. Returns: Ref to after it has been modified. Raises: ValueError: If the initial of , , and are not the same.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py", + "ast_data": "FunctionDef name:batch_scatter_update arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Assign Call If Compare Raise Call Assign Call Assign For Call Assign Assign Assign Assign Call Call If Compare Assign Call Call Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_canary_import", + "source_code": "def _canary_import(self) -> None:\n source_cmds: set[str] = set()\n for w in self._work_items:\n if w.source_cmd is not None:\n source_cmds.add(f'{w.source_cmd} && ')\n for source_cmd in source_cmds or {''}:\n cmd = f'{source_cmd}{PYTHON_CMD} -c \"import torch\"'\n proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', executable=SHELL)\n if proc.returncode:\n raise ImportError(f'Failed to import torch in subprocess: {cmd}\\n{proc.stdout}')", + "docstring": "Make sure we can import torch before launching a slew of workers.", + "type": "method", + "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py", + "ast_data": "FunctionDef name:_canary_import arg:self arguments arg Call For If Compare Call For BoolOp Assign Assign Call If Raise Call" + }, + { + "library": "pytorch", + "name": "CompiledFxGraphConstants", + "source_code": "class CompiledFxGraphConstants:\n\n def unwrap(self, g: CompiledFxGraph) -> dict[str, torch.Tensor]:\n assert g.constants is not None\n return g.constants", + "docstring": "Wrapper class that unwraps constants from a compiled fx graph. This version of the class only supports directly grabbing the saved constants off of a CompiledFxGraph. With freezing, FxGraphCache doesn't store the constants of the input GraphModule it gets from AOTAutograd. Instead, it saves just the **names** of those constants, and grabs the constant values directly from the graph module passed in at runtime. Thing is, we don't always *have* the graph module available at runtime, hence the existence of this class and its CompiledFxGraphConstantsWithGm counterpart. To support freezing, FXGraphCache gets passed a CompiledFxGraphConstantsWithGm during post compile. Otherwise, CompiledFxGraphConstants supports the basic case of loading the value of constants directly off of the original saved object.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\output_code.py", + "ast_data": "ClassDef name:CompiledFxGraphConstants FunctionDef name:unwrap arg:self arg:g arguments arg arg Compare Return return:yes" + }, + { + "library": "numpy", + "name": "_vander_nd", + "source_code": "def _vander_nd(vander_fs, points, degrees):\n n_dims = len(vander_fs)\n if n_dims != len(points):\n raise ValueError(f'Expected {n_dims} dimensions of sample points, got {len(points)}')\n if n_dims != len(degrees):\n raise ValueError(f'Expected {n_dims} dimensions of degrees, got {len(degrees)}')\n if n_dims == 0:\n raise ValueError('Unable to guess a dtype or shape when no points are given')\n points = tuple(np.asarray(tuple(points)) + 0.0)\n vander_arrays = (vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] for i in range(n_dims))\n return functools.reduce(operator.mul, vander_arrays)", + "docstring": "A generalization of the Vandermonde matrix for N dimensions The result is built by combining the results of 1d Vandermonde matrices, .. math:: W[i_0, \\ldots, i_M, j_0, \\ldots, j_N] = \\prod_{k=0}^N{V_k(x_k)[i_0, \\ldots, i_M, j_k]} where .. math:: N &= \\texttt{len(points)} = \\texttt{len(degrees)} = \\texttt{len(vander\\_fs)} \\\\ M &= \\texttt{points[k].ndim} \\\\ V_k &= \\texttt{vander\\_fs[k]} \\\\ x_k &= \\texttt{points[k]} \\\\ 0 \\le j_k &\\le \\texttt{degrees[k]} Expanding the one-dimensional :math: functions gives: .. math:: W[i_0, \\ldots, i_M, j_0, \\ldots, j_N] = \\prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \\ldots, i_M])} where :math: is the m'th basis of the polynomial construction used along dimension :math:. For a regular polynomial, :math:. Parameters ---------- vander_fs : Sequence[function(array_like, int) -> ndarray] The 1d vander function to use for each axis, such as `vander_fsvander_fs`.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polyutils.py", + "ast_data": "FunctionDef name:_vander_nd arg:vander_fs arg:points arg:degrees arguments arg arg arg Assign Call If Compare Call Raise Call Call If Compare Call Raise Call Call If Compare Raise Call Assign Call Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_MaximumGrad", + "source_code": "@ops.RegisterGradient('Maximum')\ndef _MaximumGrad(op: ops.Operation, grad):\n return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)", + "docstring": "Returns grad*(x >= y, x < y) with type of grad.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_MaximumGrad arg:op arg:grad arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, proto, *, proto_as_initial_chunk: bool=True, parent_splitter: Optional['ComposableSplitter']=None, fields_in_parent: Optional[util.FieldTypes]=None):\n self._proto = proto\n self._parent_splitter = parent_splitter\n self._fields_in_parent = fields_in_parent\n self._built = False\n self._add_chunk_order = []\n self._fix_chunk_order = False\n if parent_splitter is not None:\n self._chunks = None\n self._chunked_message = None\n elif proto_as_initial_chunk:\n self._chunks = [self._proto]\n self._chunked_message = chunk_pb2.ChunkedMessage(chunk_index=0)\n self._add_chunk_order.append(id(self._proto))\n else:\n self._chunks = []\n self._chunked_message = chunk_pb2.ChunkedMessage()", + "docstring": "Initializes ComposableSplitter. Args: proto: Proto message to split. proto_as_initial_chunk: Whether to initialize chunks with the user-provided proto as the initial chunk. parent_splitter: The parent object. fields_in_parent: Fields to access from the parent splitter's proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:proto arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign If Compare Assign Assign If Assign Assign Call Call Call Assign Assign Call" + }, + { + "library": "pytorch", + "name": "_maybe_set_opset_version", + "source_code": "def _maybe_set_opset_version(opset_imports: dict[str, int], domain: str, version: int | None) -> None:\n if domain in opset_imports and opset_imports[domain] != 1:\n return\n if domain == _ONNX_DOMAIN:\n opset_imports[domain] = _constants.TORCHLIB_OPSET\n return\n if version is None:\n opset_imports[domain] = 1\n return\n opset_imports[domain] = version", + "docstring": "Set the opset version for the domain.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py", + "ast_data": "FunctionDef name:_maybe_set_opset_version arg:opset_imports arg:domain arg:version arguments arg arg arg If BoolOp Compare Compare Return return:no If Compare Assign Return return:no If Compare Assign Return return:no Assign" + }, + { + "library": "pytorch", + "name": "_infer_ep_from_device", + "source_code": "def _infer_ep_from_device(*args) -> tuple[str, ...]:\n eps = []\n for arg in args:\n if hasattr(arg, 'device'):\n device = arg.device\n if device.type == 'cuda':\n eps.append('CUDAExecutionProvider')\n elif device.type == 'cpu':\n eps.append('CPUExecutionProvider')\n return tuple(eps)", + "docstring": "Return the first valid device (i.e., GPU or CPU) in argument list.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", + "ast_data": "FunctionDef name:_infer_ep_from_device arguments arg Assign For If Call Assign If Compare Call If Compare Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_form", + "source_code": "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super().get_form(request, obj, **defaults)", + "docstring": "Use special form during user creation", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\admin.py", + "ast_data": "FunctionDef name:get_form arg:self arg:request arg:obj arguments arg arg arg arg Assign If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "schedule_comm_wait", + "source_code": "def schedule_comm_wait(graph: fx.Graph) -> None:\n ops = (torch.ops._c10d_functional.all_reduce_.default, torch.ops._c10d_functional.all_reduce.default, torch.ops._c10d_functional.all_reduce_coalesced.default, torch.ops._c10d_functional.all_reduce_coalesced_.default)\n comm_blocks = get_all_comm_blocks(graph, ops)\n if not comm_blocks:\n return\n allreduce_users = OrderedSet[fx.Node]()\n for allreduce in comm_blocks:\n for output in allreduce.outputs:\n allreduce_users.update(output.users)\n node_indices = {node: i for i, node in enumerate(graph.nodes)}\n for allreduce in comm_blocks:\n assert len(allreduce.outputs) >= 1, f'Found a allreduce that has zero outputs/users -- {allreduce}.'\n target_node = next(iter(next(iter(allreduce.outputs)).users))\n target_node_index = 2 ** 31\n for user in (user for output in allreduce.outputs for user in output.users):\n index = node_indices[user]\n if index < target_node_index:\n target_node = user\n target_node_index = index\n wait_idx = -1\n for wait_idx, node in enumerate(allreduce.node_list):\n if node == allreduce.wait_nodes[0]:\n break\n assert wait_idx >= 0\n move_block_before(allreduce.node_list[wait_idx:], target_node)", + "docstring": "Delay the execution of wait tensors of allreduce until its first user. This algorithm considers the intermediate users, like split, getitem, of the wait node and schedule those intermediate users as well. This will result in a better overlapping result.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\ddp_fusion.py", + "ast_data": "FunctionDef name:schedule_comm_wait arg:graph arguments arg Assign Assign Call If Return return:no Assign Call For For Call Assign Call For Compare Call Assign Call Call Call Call Assign For Assign If Compare Assign Assign Assign For Call If Compare Compare Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, input: Tensor, output_size: Optional[list[int]]=None) -> Tensor:\n if self.padding_mode != 'zeros':\n raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')\n assert isinstance(self.padding, tuple)\n num_spatial_dims = 2\n output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation)\n return F.conv_transpose2d(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)", + "docstring": "Performs the forward pass. Attributes: input (Tensor): The input tensor. output_size (list[int], optional): A list of integers representing the size of the output tensor. Default is None.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\conv.py", + "ast_data": "FunctionDef name:forward arg:self arg:input arg:output_size arguments arg arg arg If Compare Raise Call Call Assign Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "boundary", + "source_code": "@property\ndef boundary(self):\n return self._geomgen(capi.get_boundary)", + "docstring": "Return the boundary of this geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:boundary arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_offset_text", + "source_code": "def get_offset_text(self):\n return self.offsetText", + "docstring": "Return the axis offsetText as a Text instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_offset_text arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "textwidth", + "source_code": "def textwidth(text: str, widechars: str='WF') -> int:\n\n def charwidth(char: str, widechars: str) -> int:\n if east_asian_width(char) in widechars:\n return 2\n else:\n return 1\n return sum((charwidth(c, widechars) for c in text))", + "docstring": "Get width of text.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\rst.py", + "ast_data": "FunctionDef name:textwidth arg:text arg:widechars arguments arg arg FunctionDef name:charwidth arg:char arg:widechars arguments arg arg If Compare Call Return return:yes Return return:yes Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "tzinfo", + "source_code": "@property\ndef tzinfo(self) -> tzinfo | None:\n return self.tz", + "docstring": "Alias for tz attribute", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", + "ast_data": "FunctionDef name:tzinfo arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "pdf_from_cf_with_fft", + "source_code": "def pdf_from_cf_with_fft(cf, h=0.01, q=9, level=3):\n n = level\n N = 2 ** q\n steps = np.arange(0, N)\n L = N * h / 2\n x_l = np.pi * (steps - N / 2) / L\n if level > 1:\n indices = np.arange(n).reshape(n, 1)\n s1 = np.sum((-1) ** steps * Cotes[n, indices] * np.fft.fft((-1) ** steps * cf(-L + h * steps + h * indices / (n - 1))) * np.exp(1j * np.pi * indices / (n - 1) - 2 * 1j * np.pi * indices * steps / (N * (n - 1))), axis=0)\n else:\n s1 = (-1) ** steps * Cotes[n, 0] * np.fft.fft((-1) ** steps * cf(-L + h * steps))\n density = h * s1 / (2 * np.pi * np.sum(Cotes[n]))\n return (x_l, density)", + "docstring": "Calculates pdf from characteristic function. Uses fast Fourier transform with Newton-Cotes integration following [WZ]. Defaults to using Simpson's method (3-point Newton-Cotes integration). Parameters ---------- cf : callable Single argument function from float -> complex expressing a characteristic function for some distribution. h : Optional[float] Step size for Newton-Cotes integration. Default: 0.01 q : Optional[int] Use 2**q steps when performing Newton-Cotes integration. The infinite integral in the inverse Fourier transform will then be restricted to the interval [-2**q * h / 2, 2**q * h / 2]. Setting the number of steps equal to a power of 2 allows the fft to be calculated in O(n*log(n)) time rather than O(n**2). Default: 9 level : Optional[int] Calculate integral using n-point Newton-Cotes integration for n = level. The 3-point Newton-Cotes formula corresponds to Simpson's rule. Default: 3 Returns ------- x_l : ndarray Array of points x at which pdf is estimated. 2**q equally spaced points from -pi/h up to but not including pi/h. density : ndarray Estimated values of pdf corresponding to cf at points in x_l. References ---------- .. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method to compute densities of stable distribution.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_levy_stable\\__init__.py", + "ast_data": "FunctionDef name:pdf_from_cf_with_fft arg:cf arg:h arg:q arg:level arguments arg arg arg arg Assign Assign Assign Call Assign Assign If Compare Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "TestEnvironment", + "source_code": "class TestEnvironment(object):\n\n def __init__(self):\n self.tf_data_service_dispatcher = None\n self.total_phsyical_gpus = None\n\n def __setattr__(self, name, value):\n if not in_main_process():\n raise ValueError('combinations.env() should only be modified in the main process. Condition your code on combinations.in_main_process().')\n super().__setattr__(name, value)", + "docstring": "Holds the test environment information. Tests should modify the attributes of the instance returned by in the main process if needed, and it will be passed to the worker processes each time a test case is run.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "ClassDef name:TestEnvironment FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "concatenate", + "source_code": "def concatenate(self, dataset, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import concatenate_op\n return concatenate_op._concatenate(self, dataset, name)", + "docstring": "Creates a by concatenating the given dataset with this dataset. >>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ] >>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ] >>> ds = a.concatenate(b) >>> [a.item() for a in ds.as_numpy_iterator()] [1, 2, 3, 4, 5, 6, 7] >>> # The input dataset and dataset to be concatenated should have >>> # compatible element specs. >>> c = tf.data.Dataset.zip((a, b)) >>> a.concatenate(c) Traceback (most recent call last): TypeError: Two datasets to concatenate have different types and (tf.int64, tf.int64) >>> d = tf.data.Dataset.from_tensor_slices([\"a\", \"b\", \"c\"]) >>> a.concatenate(d) Traceback (most recent call last): TypeError: Two datasets to concatenate have different types and Args: dataset: to be concatenated. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:concatenate arg:self arg:dataset arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_clone_helper", + "source_code": "def _clone_helper(op_to_clone, variant_tensor_ops):\n remap_dict = {}\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in variant_tensor_ops:\n recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n remap_dict.update(recursive_map)\n inputs_list = []\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in remap_dict:\n remapped_input = remap_dict[input_tensor_op].outputs[0]\n inputs_list.append(remapped_input)\n else:\n inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n g = ops.get_default_graph()\n new_op = g.create_op(op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone))\n remap_dict[op_to_clone] = new_op\n return remap_dict", + "docstring": "Helper method that recursively clones . Args: op_to_clone: The op we want to clone. variant_tensor_ops: A list of ops that we have to clone along the way. Returns: A dictionary mapping old_ops to new_ops created. Includes op_to_clone as a key.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_ops.py", + "ast_data": "FunctionDef name:_clone_helper arg:op_to_clone arg:variant_tensor_ops arguments arg arg Assign For Assign If Compare Assign Call Call Assign For Assign If Compare Assign Call Call Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "__getitem__", + "source_code": "def __getitem__(self, key):\n if not self.loaded:\n self.load()\n return self._data[key]", + "docstring": "Retrieve a session-stored object.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, reader_ref, supports_serialize=False):\n if context.executing_eagerly():\n raise RuntimeError('Readers are not supported when eager execution is enabled. Instead, please use tf.data to get data into your model.')\n self._reader_ref = reader_ref\n self._supports_serialize = supports_serialize", + "docstring": "Creates a new ReaderBase. Args: reader_ref: The operation that implements the reader. supports_serialize: True if the reader implementation can serialize its state. Raises: RuntimeError: If eager execution is enabled.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:reader_ref arg:supports_serialize arguments arg arg arg If Call Raise Call Assign Assign" + }, + { + "library": "authlib", + "name": "register_algorithm", + "source_code": "@classmethod\ndef register_algorithm(cls, algorithm):\n if not algorithm or algorithm.algorithm_type != 'JWE':\n raise ValueError(f'Invalid algorithm for JWE, {algorithm!r}')\n if algorithm.algorithm_location == 'alg':\n cls.ALG_REGISTRY[algorithm.name] = algorithm\n elif algorithm.algorithm_location == 'enc':\n cls.ENC_REGISTRY[algorithm.name] = algorithm\n elif algorithm.algorithm_location == 'zip':\n cls.ZIP_REGISTRY[algorithm.name] = algorithm", + "docstring": "Register an algorithm for `` of JWE.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py", + "ast_data": "FunctionDef name:register_algorithm arg:cls arg:algorithm arguments arg arg If BoolOp Compare Raise Call If Compare Assign If Compare Assign If Compare Assign" + }, + { + "library": "pytorch", + "name": "interpolate", + "source_code": "def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.interpolate' must be quantized!\")\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)", + "docstring": "Down/up samples the input to either the given :attr: or the given :attr: See :func: for implementation details. The input dimensions are interpreted in the form: . .. note:: The input quantization parameters propagate to the output. .. note:: Only 2D/3D input is supported for quantized inputs .. note:: Only the following modes are supported for the quantized inputs: - - Args: input (Tensor): the input tensor size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): output spatial size. scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple. mode (str): algorithm used for upsampling: `scale_factormode`", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:interpolate arg:input arg:size arg:scale_factor arg:mode arg:align_corners arguments arg arg arg arg arg If Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_grey", + "source_code": "@classmethod\ndef get_grey(cls, tex, fontsize=None, dpi=None):\n fontsize = mpl._val_or_rc(fontsize, 'font.size')\n dpi = mpl._val_or_rc(dpi, 'savefig.dpi')\n key = (cls._get_tex_source(tex, fontsize), dpi)\n alpha = cls._grey_arrayd.get(key)\n if alpha is None:\n pngfile = cls.make_png(tex, fontsize, dpi)\n rgba = mpl.image.imread(os.path.join(cls._texcache, pngfile))\n cls._grey_arrayd[key] = alpha = rgba[:, :, -1]\n return alpha", + "docstring": "Return the alpha channel.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:get_grey arg:cls arg:tex arg:fontsize arg:dpi arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_in_multi_worker_mode", + "source_code": "def _in_multi_worker_mode(self):\n return False", + "docstring": "Whether this strategy indicates working in multi-worker settings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "align_titles", + "source_code": "def align_titles(self, axs=None):\n if axs is None:\n axs = self.axes\n axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]\n for ax in axs:\n _log.debug(' Working on: %s', ax.get_title())\n rowspan = ax.get_subplotspec().rowspan\n for axc in axs:\n rowspanc = axc.get_subplotspec().rowspan\n if rowspan.start == rowspanc.start:\n self._align_label_groups['title'].join(ax, axc)", + "docstring": "Align the titles of subplots in the same subplot row if title alignment is being done automatically (i.e. the title position is not manually set). Alignment persists for draw events after this is called. Parameters ---------- axs : list of Optional list of (or ndarray) to align the titles. Default is to align all Axes on the figure. See Also -------- matplotlib.figure.Figure.align_xlabels matplotlib.figure.Figure.align_ylabels matplotlib.figure.Figure.align_labels Notes ----- This assumes that all Axes in `.GridSpec.SubplotSpec` positions correspond to figure positions. Examples -------- Example with titles:: fig, axs = plt.subplots(1, 2) axs[0].set_aspect('equal') axs[0].set_title('Title 0') axs[1].set_title('Title 1') fig.align_titles()", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:align_titles arg:self arg:axs arguments arg arg If Compare Assign Assign Call Compare Call For Call Call Assign Call For Assign Call If Compare Call" + }, + { + "library": "tensorflow", + "name": "_parse_type_to_int", + "source_code": "def _parse_type_to_int(dtype, flag):\n if dtype not in mmi_constants.TFLITE_TYPES:\n raise ValueError(\"Unsupported value '{0}' for {1}. Only {2} are supported.\".format(dtype, flag, mmi_constants.TFLITE_TYPES))\n dtype_str = mmi_constants.TFLITE_TO_STR_TYPES[dtype]\n dtype_int = schema_fb.TensorType.__dict__[dtype_str]\n return dtype_int", + "docstring": "Converts a tflite type to it's integer representation. Args: dtype: tf.DType representing the inference type. flag: str representing the flag name. Returns: integer, a tflite TensorType enum value. Raises: ValueError: Unsupported tflite type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\python\\modify_model_interface_lib.py", + "ast_data": "FunctionDef name:_parse_type_to_int arg:dtype arg:flag arguments arg arg If Compare Raise Call Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "random", + "source_code": "@staticmethod\ndef random(seed=None, rerandomize_each_iteration=None, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import random_op\n return random_op._random(seed=seed, rerandomize_each_iteration=rerandomize_each_iteration, name=name)", + "docstring": "Creates a of pseudorandom values. The dataset generates a sequence of uniformly distributed integer values. controls whether the sequence of random number generated should be re-randomized for each epoch. The default value is False where the dataset generates the same sequence of random numbers for each epoch. >>> ds1 = tf.data.Dataset.random(seed=4).take(10) >>> ds2 = tf.data.Dataset.random(seed=4).take(10) >>> print(list(ds1.as_numpy_iterator())==list(ds2.as_numpy_iterator())) True >>> ds3 = tf.data.Dataset.random(seed=4).take(10) >>> ds3_first_epoch = list(ds3.as_numpy_iterator()) >>> ds3_second_epoch = list(ds3.as_numpy_iterator()) >>> print(ds3_first_epoch == ds3_second_epoch) True >>> ds4 = tf.data.Dataset.random( ... seed=4, rerandomize_each_iteration=True).take(10) >>> ds4_first_epoch = list(ds4.as_numpy_iterator()) >>> ds4_second_epoch = list(ds4.as_numpy_iterator()) >>> print(ds4_first_epoch == ds4_second_epoch) False Args: seed: (Optional) If specified, the dataset produces a deterministic sequence of values. rerandomize_each_iteration: (Optional) If set to False, the dataset generates the same sequence of random numbers for each epoch. If set to True, it generates a different deterministic sequence of random numbers for each epoch. It is defaulted to False if left unspecified. name: (Optional.) A name for the tf.data operation. Returns: Dataset: A .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:random arg:seed arg:rerandomize_each_iteration arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "ASTParenAttribute", + "source_code": "class ASTParenAttribute(ASTAttribute):\n\n def __init__(self, id: str, arg: str) -> None:\n self.id = id\n self.arg = arg\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, ASTParenAttribute):\n return NotImplemented\n return self.id == other.id and self.arg == other.arg\n\n def __hash__(self) -> int:\n return hash((self.id, self.arg))\n\n def _stringify(self, transform: StringifyTransform) -> str:\n return f'{self.id}({self.arg})'\n\n def describe_signature(self, signode: TextElement) -> None:\n signode.append(nodes.Text(str(self)))", + "docstring": "For paren attributes defined by the user.", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\cfamily.py", + "ast_data": "ClassDef name:ASTParenAttribute FunctionDef name:__init__ arg:self arg:id arg:arg arguments arg arg arg Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:_stringify arg:self arg:transform arguments arg arg Return return:yes FunctionDef name:describe_signature arg:self arg:signode arguments arg arg Call Call Call" + }, + { + "library": "matplotlib", + "name": "intersects_bbox", + "source_code": "def intersects_bbox(self, bbox, filled=True):\n return _path.path_intersects_rectangle(self, bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)", + "docstring": "Return whether this path intersects a given . If *filled* is True, then this also returns True if the path completely encloses the (i.e., the path is treated as filled). The bounding box is always considered filled.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:intersects_bbox arg:self arg:bbox arg:filled arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "join", + "source_code": "def join(self, join, reuse=None):\n reuse_aliases = [a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join]\n if reuse_aliases:\n if join.table_alias in reuse_aliases:\n reuse_alias = join.table_alias\n else:\n reuse_alias = reuse_aliases[-1]\n self.ref_alias(reuse_alias)\n return reuse_alias\n alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)\n if join.join_type:\n if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:\n join_type = LOUTER\n else:\n join_type = INNER\n join.join_type = join_type\n join.table_alias = alias\n self.alias_map[alias] = join\n if (filtered_relation := join.filtered_relation):\n resolve_reuse = reuse\n if resolve_reuse is not None:\n resolve_reuse = set(reuse) | {alias}\n joins_len = len(self.alias_map)\n join.filtered_relation = filtered_relation.resolve_expression(self, reuse=resolve_reuse)\n if joins_len < len(self.alias_map):\n self.alias_map[alias] = self.alias_map.pop(alias)\n return alias", + "docstring": "Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:join arg:self arg:join arg:reuse arguments arg arg arg Assign Call BoolOp BoolOp Compare Compare Compare If If Compare Assign Assign Call Return return:yes Assign Call If If BoolOp Compare Assign Assign Assign Assign Assign If Assign If Compare Assign Call Assign Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y):\n if not hasattr(self, 'coef_'):\n self._more_validate_params(for_partial_fit=True)\n lr = 'pa1' if self.loss == 'epsilon_insensitive' else 'pa2'\n return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, max_iter=1, sample_weight=None, coef_init=None, intercept_init=None)", + "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg If Call Call Assign Compare Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "draw_base2", + "source_code": "def draw_base2(self, m: int, out: Optional[torch.Tensor]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n n = 2 ** m\n total_n = self.num_generated + n\n if not total_n & total_n - 1 == 0:\n raise ValueError(f\"The balance properties of Sobol' points require n to be a power of 2. {self.num_generated} points have been previously generated, then: n={self.num_generated}+2**{m}={total_n}. If you still want to do this, please use 'SobolEngine.draw()' instead.\")\n return self.draw(n=n, out=out, dtype=dtype)", + "docstring": "Function to draw a sequence of :attr: points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:. Args: m (Int): The (base2) exponent of the number of points to draw. out (Tensor, optional): The output tensor dtype (:class:, optional): the desired data type of the returned tensor. Default: ``", + "type": "method", + "file_path": "pytorch\\torch\\quasirandom.py", + "ast_data": "FunctionDef name:draw_base2 arg:self arg:m arg:out arg:dtype arguments arg arg arg arg Assign Assign If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "nodes_filter", + "source_code": "def nodes_filter(nodes: list[torch.fx.Node], node_call_back) -> list[torch.fx.Node]:\n return [node for node in nodes if node_call_back(node)]", + "docstring": "Returns the nodes that match the node_call_back as a list.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\utils.py", + "ast_data": "FunctionDef name:nodes_filter arg:nodes arg:node_call_back arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reduce_all", + "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_all)\ndef reduce_all(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]):\n return _cast(reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims), dtypes.bool)", + "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:reduce_all arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Return return:yes Call Call Call Call" + }, + { + "library": "pygame", + "name": "_clip_and_draw_aaline", + "source_code": "def _clip_and_draw_aaline(surf, rect, color, line, blend):\n if not clip_line(line, BoundingBox(rect.x - 1, rect.y - 1, rect.x + rect.w, rect.y + rect.h), use_float=True):\n return\n _draw_aaline(surf, color, Point(line[0], line[1]), Point(line[2], line[3]), blend)\n return", + "docstring": "draw anti-aliased line between two endpoints.", + "type": "function", + "file_path": "pygame\\src_py\\draw_py.py", + "ast_data": "FunctionDef name:_clip_and_draw_aaline arg:surf arg:rect arg:color arg:line arg:blend arguments arg arg arg arg arg If Call Call Return return:no Call Call Call Return return:no" + }, + { + "library": "numpy", + "name": "_hist_bin_rice", + "source_code": "def _hist_bin_rice(x, range):\n del range\n return _ptp(x) / (2.0 * x.size ** (1.0 / 3))", + "docstring": "Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_histograms_impl.py", + "ast_data": "FunctionDef name:_hist_bin_rice arg:x arg:range arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_nvals_uniform_row_length", + "source_code": "def _nvals_uniform_row_length(values, uniform_row_length):\n const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value\n if const_nvals is not None:\n nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)\n elif isinstance(values, RaggedTensor):\n nvals = values.nrows(out_type=uniform_row_length.dtype)\n else:\n nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]\n return nvals", + "docstring": "Get the number of values for uniform row length constructor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:_nvals_uniform_row_length arg:values arg:uniform_row_length arguments arg arg Assign Call If Compare Assign Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_run_static_range_qat", + "source_code": "def _run_static_range_qat(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, signature_def_map: _SignatureDefMap) -> None:\n logging.info('Running static-range quantization for QAT model.')\n pywrap_quantize_model.quantize_qat_model(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())", + "docstring": "Runs static-range quantization for a Quantization-Aware Trained model. Runs the quantization for a model trained using QAT. Args: src_saved_model_path: Path to the source SavedModel directory. dst_saved_model_path: Path to the destination SavedModel directory. quant_opts: Quantization options. signature_def_map: Signature def key -> SignatureDef mapping.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py", + "ast_data": "FunctionDef name:_run_static_range_qat arg:src_saved_model_path arg:dst_saved_model_path arg:quant_opts arg:signature_def_map arguments arg arg arg arg Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "variable_scope", + "source_code": "@property\ndef variable_scope(self):\n return self._variable_scope", + "docstring": "Returns the variable scope object created by this Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:variable_scope arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "BaseTzLoader", + "source_code": "class BaseTzLoader(TimestamptzLoader):\n timezone = None\n\n def load(self, data):\n res = super().load(data)\n return res.replace(tzinfo=self.timezone)", + "docstring": "Load a PostgreSQL timestamptz using the a specific timezone. The timezone can be None too, in which case it will be chopped.", + "type": "class", + "file_path": "django\\django\\db\\backends\\postgresql\\psycopg_any.py", + "ast_data": "ClassDef name:BaseTzLoader Assign FunctionDef name:load arg:self arg:data arguments arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "merge_with_descriptors", + "source_code": "def merge_with_descriptors(self, descriptors: Tensor) -> DISKFeatures:\n dtype = descriptors.dtype\n x, y = self.xys.T\n desc = descriptors[:, y, x].T\n desc = F.normalize(desc, dim=-1)\n return DISKFeatures(self.xys.to(dtype), desc, self.detection_logp)", + "docstring": "Select descriptors from a dense tensor, at locations given by .", + "type": "method", + "file_path": "kornia\\kornia\\feature\\disk\\structs.py", + "ast_data": "FunctionDef name:merge_with_descriptors arg:self arg:descriptors arguments arg arg Assign Assign Assign Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "rank", + "source_code": "def rank(self) -> Optional[int]:\n return self._rank", + "docstring": "Returns the rank of remote worker representing the remote device. Returns `` if no rank is available.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\remote_device.py", + "ast_data": "FunctionDef name:rank arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "inside_box_boundaries", + "source_code": "def inside_box_boundaries(x, lb, ub):\n return (lb <= x).all() and (x <= ub).all()", + "docstring": "Check if lb <= x <= ub.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py", + "ast_data": "FunctionDef name:inside_box_boundaries arg:x arg:lb arg:ub arguments arg arg arg Return return:yes BoolOp Call Compare Call Compare" + }, + { + "library": "pytorch", + "name": "join", + "source_code": "def join(self, other: LiveRange):\n return LiveRange(min(self.begin, other.begin), max(self.end, other.end))", + "docstring": "Combine two ranges using a union operation", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:join arg:self arg:other arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "aps01_f", + "source_code": "def aps01_f(x):\n return np.sin(x) - x / 2", + "docstring": "Straightforward sum of trigonometric function and polynomial", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps01_f arg:x arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "gen_predict_fn_def", + "source_code": "def gen_predict_fn_def(self):\n return 'def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:'", + "docstring": "Generates the definition of the predict function.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:gen_predict_fn_def arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "_izip_records", + "source_code": "def _izip_records(seqarrays, fill_value=None, flatten=True):\n if flatten:\n zipfunc = _izip_fields_flat\n else:\n zipfunc = _izip_fields\n for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):\n yield tuple(zipfunc(tup))", + "docstring": "Returns an iterator of concatenated items from a sequence of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays. fill_value : {None, integer} Value used to pad shorter iterables. flatten : {True, False}, Whether to", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:_izip_records arg:seqarrays arg:fill_value arg:flatten arguments arg arg arg If Assign Assign For Call Call Call" + }, + { + "library": "matplotlib", + "name": "set_subplotspec", + "source_code": "def set_subplotspec(self, subplotspec):\n self._subplotspec = subplotspec\n self._set_position(subplotspec.get_position(self.get_figure(root=False)))", + "docstring": "Set the . associated with the subplot.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:set_subplotspec arg:self arg:subplotspec arguments arg arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_dependent_variables", + "source_code": "def _get_dependent_variables(input_ops, output_ops):\n output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n inbetween_ops = op_selector.get_backward_walk_ops(seed_ops=output_ops, stop_at_ts=input_ops, inclusive=False, only_differentiable=True)\n var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n var_names = (op.name for op in var_ops)\n tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n tf_vars = [v for v in tf_vars if v is not None]\n return tf_vars", + "docstring": "Finds variables involved in the subgraph between input_ops and output_ops. Args: input_ops: Flattened list of input ops output_ops: Flattened list of output ops Returns: A list of variables", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py", + "ast_data": "FunctionDef name:_get_dependent_variables arg:input_ops arg:output_ops arguments arg arg Assign Call Assign Call Assign Compare Assign Assign Call Assign Compare Return return:yes" + }, + { + "library": "kornia", + "name": "_estimate_homography", + "source_code": "def _estimate_homography(self, keypoints1: Tensor, keypoints2: Tensor) -> Tensor:\n if self.estimator == 'vanilla':\n homo = find_homography_dlt_iterated(keypoints2[None], keypoints1[None], torch.ones_like(keypoints1[None, :, 0]))\n elif self.estimator == 'ransac':\n homo, _ = self.ransac(keypoints2, keypoints1)\n homo = homo[None]\n else:\n raise NotImplementedError(f'Unsupported estimator {self.estimator}. Use `ransac` or `vanilla` instead.')\n return homo", + "docstring": "Estimate homography by the matched keypoints. Args: keypoints1: matched keypoint set from an image, shaped as :math:. keypoints2: matched keypoint set from the other image, shaped as :math:.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\image_stitching.py", + "ast_data": "FunctionDef name:_estimate_homography arg:self arg:keypoints1 arg:keypoints2 arguments arg arg arg If Compare Assign Call Call If Compare Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "graph_execution_trace_to_tensor_value", + "source_code": "def graph_execution_trace_to_tensor_value(self, trace):\n debug_event = self._reader.read_graph_execution_traces_event(trace.locator)\n return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)", + "docstring": "Read full tensor values from an Execution or ExecutionDigest. Args: trace: An or object. Returns: A numpy array representing the output tensor value of the intra-graph tensor execution event.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:graph_execution_trace_to_tensor_value arg:self arg:trace arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_prepare_input_for_onnx", + "source_code": "def _prepare_input_for_onnx(args, kwargs, remained_onnx_input_idx: Sequence[int] | None, flatten: bool):\n onnx_inputs = _prepare_input_for_export(args, kwargs)\n if flatten:\n onnx_inputs, _ = torch.jit._flatten(onnx_inputs)\n elif onnx_inputs and onnx_inputs[-1] == {}:\n onnx_inputs = onnx_inputs[:-1]\n if remained_onnx_input_idx is not None:\n return [onnx_inputs[i] for i in remained_onnx_input_idx]\n else:\n return onnx_inputs", + "docstring": "Prepare input for ONNX model execution in ONNX backend. Any future changes/formatting to the input before dispatching to the ONNX backend run should be made in this function. Args: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method. remained_onnx_input_idx: indices of inputs to be used for ONNX model execution. flatten: whether to flatten the input before dispatching to the ONNX model execution. Returns: onnx_inputs: positional arguments for ONNX model execution in ONNX backend.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_prepare_input_for_onnx arg:args arg:kwargs arg:remained_onnx_input_idx arg:flatten arguments arg arg arg arg Assign Call If Assign Call If BoolOp Compare Assign If Compare Return return:yes Return return:yes" + }, + { + "library": "seaborn", + "name": "_hue_backcompat", + "source_code": "def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n default_behavior = color is None or palette is not None\n if force_hue and 'hue' not in self.variables and default_behavior:\n self._redundant_hue = True\n self.plot_data['hue'] = self.plot_data[self.orient]\n self.variables['hue'] = self.variables[self.orient]\n self.var_types['hue'] = 'categorical'\n hue_order = self.var_levels[self.orient]\n if isinstance(palette, dict):\n palette = {str(k): v for k, v in palette.items()}\n else:\n if 'hue' in self.variables:\n redundant = (self.plot_data['hue'] == self.plot_data[self.orient]).all()\n else:\n redundant = False\n self._redundant_hue = redundant\n if 'hue' in self.variables and palette is None and (color is not None):\n if not isinstance(color, str):\n color = mpl.colors.to_hex(color)\n palette = f'dark:{color}'\n msg = f\"\\n\\nSetting a gradient palette using color= is deprecated and will be removed in v0.14.0. Set `palette='{palette}'` for the same effect.\\n\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n return (palette, hue_order)", + "docstring": "Implement backwards compatibility for hue parametrization. Note: the force_hue parameter is used so that functions can be shown to pass existing tests during refactoring and then tested for new behavior. It can be removed after completion of the work.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_hue_backcompat arg:self arg:color arg:palette arg:hue_order arg:force_hue arguments arg arg arg arg arg Assign BoolOp Compare Compare If BoolOp Compare Assign Assign Assign Assign Assign If Call Assign Call Call If Compare Assign Call Compare Assign Assign If BoolOp Compare Compare Compare If Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, num_buckets: int, lower: float, upper: float):\n if num_buckets < 2:\n raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')\n self.num_buckets = num_buckets\n self.lower = lower\n self.upper = upper", + "docstring": "Simulated quantizaiton configuration. Args: num_buckets: The number of quantization buckets, must be atleast 2. lower: The lower bound for the quantization range. upper: The upper bound for the quantization range. Returns: . Raises: ValueError: if is less than 2.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_buckets arg:lower arg:upper arguments arg arg arg arg If Compare Raise Call Assign Assign Assign" + }, + { + "library": "pygame", + "name": "move_to_front", + "source_code": "def move_to_front(self, sprite):\n self.change_layer(sprite, self.get_top_layer())", + "docstring": "bring the sprite to front layer LayeredUpdates.move_to_front(sprite): return None Brings the sprite to front by changing the sprite layer to the top-most layer. The sprite is added at the end of the list of sprites in that top-most layer.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:move_to_front arg:self arg:sprite arguments arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "get_text_width_height_descent", + "source_code": "def get_text_width_height_descent(self, s, prop, ismath):\n fontsize = prop.get_size_in_points()\n if ismath == 'TeX':\n return self.get_texmanager().get_text_width_height_descent(s, fontsize, renderer=self)\n dpi = self.points_to_pixels(72)\n if ismath:\n dims = self._text2path.mathtext_parser.parse(s, dpi, prop)\n return dims[0:3]\n flags = self._text2path._get_hinting_flag()\n font = self._text2path._get_font(prop)\n font.set_size(fontsize, dpi)\n font.set_text(s, 0.0, flags=flags)\n w, h = font.get_width_height()\n d = font.get_descent()\n w /= 64.0\n h /= 64.0\n d /= 64.0\n return (w, h, d)", + "docstring": "Get the width, height, and descent (offset from the bottom to the baseline), in display coords, of the string *s* with *prop*. Whitespace at the start and the end of *s* is included in the reported width.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:get_text_width_height_descent arg:self arg:s arg:prop arg:ismath arguments arg arg arg arg Assign Call If Compare Return return:yes Call Call Assign Call If Assign Call Return return:yes Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "apply_non_transform_keypoint", + "source_code": "def apply_non_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n return input", + "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py", + "ast_data": "FunctionDef name:apply_non_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "DeviceWrapper", + "source_code": "@deprecated(None, 'Please use tf.keras.layers.RNN instead.')\n@tf_export('nn.RNNCellDeviceWrapper', v1=[])\nclass DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase, _RNNCellWrapperV2):\n\n def __init__(self, *args, **kwargs):\n super(DeviceWrapper, self).__init__(*args, **kwargs)\n __init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__", + "docstring": "Operator that ensures an RNNCell runs on a particular device.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py", + "ast_data": "ClassDef name:DeviceWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "_as_tensor_fullprec", + "source_code": "def _as_tensor_fullprec(t):\n ty = type(t)\n if ty is builtins.float:\n return torch.as_tensor(t, dtype=torch.float64)\n elif ty is builtins.int:\n return torch.as_tensor(t, dtype=torch.int64)\n else:\n return torch.as_tensor(t)", + "docstring": "Like torch.as_tensor, but when given Python data types it will keep them in full precision. Used for calling convention for Dynamo.", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:_as_tensor_fullprec arg:t arguments arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_validate_quantized_input_stats", + "source_code": "def _validate_quantized_input_stats(self, converter_kwargs, quant_mode):\n quantized_types = frozenset({_dtypes.int8, _dtypes.uint8})\n requires_quantized_input_stats = (converter_kwargs['inference_type'] in quantized_types or converter_kwargs['inference_input_type'] in quantized_types) and (not quant_mode.is_post_training_integer_quantization())\n if requires_quantized_input_stats and (not converter_kwargs['quantized_input_stats']):\n raise ValueError('The `quantized_input_stats` flag must be defined when either `inference_type` flag or `inference_input_type` flag is set to tf.int8 or tf.uint8. Currently, `inference_type={}` and `inference_input_type={}`.'.format(_get_tf_type_name(converter_kwargs['inference_type']), _get_tf_type_name(converter_kwargs['inference_input_type'])))", + "docstring": "Ensure the flag is provided if required.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_validate_quantized_input_stats arg:self arg:converter_kwargs arg:quant_mode arguments arg arg arg Assign Call Assign BoolOp BoolOp Compare Compare Call If BoolOp Raise Call Call Call Call" + }, + { + "library": "scipy", + "name": "__new__", + "source_code": "def __new__(cls, *system, **kwargs):\n if cls is LinearTimeInvariant:\n raise NotImplementedError('The LinearTimeInvariant class is not meant to be used directly, use `lti` or `dlti` instead.')\n return super().__new__(cls)", + "docstring": "Create a new object, don't allow direct instances.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "render_rect_filled", + "source_code": "def render_rect_filled(self, output: Output, x1: float, y1: float, x2: float, y2: float) -> None:\n output.rects.append((x1, y1, x2, y2))", + "docstring": "Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:render_rect_filled arg:self arg:output arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "code", + "source_code": "@property\ndef code(self):\n return self.forward.code", + "docstring": "Return a pretty-printed representation (as valid Python syntax) of the internal graph for the `inspecting-code` for details.", + "type": "method", + "file_path": "pytorch\\torch\\jit\\_script.py", + "ast_data": "FunctionDef name:code arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "prim_tolist", + "source_code": "@_onnx_symbolic('prim::tolist')\ndef prim_tolist(g: jit_utils.GraphContext, input, dim_val, elem_ty_val):\n dim = symbolic_helper._maybe_get_const(dim_val, 'i')\n if dim > 1:\n return symbolic_helper._unimplemented('prim::tolist', 'dim_val > 1', input)\n return input", + "docstring": "tolist is currently supported only for 1D input tensors. dim_val and elem_ty_val represent dimension and type annotations that need to match dimension and type of the input tensor.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", + "ast_data": "FunctionDef name:prim_tolist arg:g arg:input arg:dim_val arg:elem_ty_val arguments arg arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "degree", + "source_code": "@property\ndef degree(self):\n return self._N - 1", + "docstring": "Degree of the polynomial. One less the number of control points.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", + "ast_data": "FunctionDef name:degree arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "_annotate_heatmap", + "source_code": "def _annotate_heatmap(self, ax, mesh):\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + 0.5, np.arange(height) + 0.5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat, mesh.get_array().flat, mesh.get_facecolors(), self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = '.15' if lum > 0.408 else 'w'\n annotation = ('{:' + self.fmt + '}').format(val)\n text_kwargs = dict(color=text_color, ha='center', va='center')\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)", + "docstring": "Add textual labels with the value in each cell.", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:_annotate_heatmap arg:self arg:ax arg:mesh arguments arg arg arg Call Assign Assign Call Call Call For Call Call Call If Compare Assign Call Assign Compare Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "set_object_type", + "source_code": "def set_object_type(self, object_type: Union[Callable, str], qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n self._insert_qconfig_list('object_type_qconfigs', [object_type], qconfig_list)\n return self", + "docstring": "Set object type QConfigs see :func: for more info", + "type": "method", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py", + "ast_data": "FunctionDef name:set_object_type arg:self arg:object_type arg:qconfig_list arguments arg arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "embedding_lookup", + "source_code": "@tf_export(v1=['nn.embedding_lookup'])\n@dispatch.add_dispatch_support\ndef embedding_lookup(params, ids, partition_strategy='mod', name=None, validate_indices=True, max_norm=None):\n \"\"\"\n **Behavior Difference between CPU and GPU**\n\n Please note that when using `tf.nn.embedding_lookup` on a GPU, if an out-of-bound\n index is encountered, a value of 0 will be stored in the corresponding output value.\n On the other hand, when using `tf.nn.embedding_lookup` on a CPU, an error will be\n returned if an out-of-bound index is found.\n\n This behavior difference can impact the results of your computation, especially when\n dealing with indices that may go beyond the bounds of the tensor.\n Make sure to be mindful of this distinction when using the `tf.nn.embedding_lookup`\n function in your computations.\n\n **Usage Example**\n\n Here's an example demonstrating how to use `tf.nn.embedding_lookup`:\n\n ```python\n import tensorflow as tf\n\n\n embedding_matrix = tf.constant([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])\n indices = tf.constant([1, 0, 2])\n\n\n embeddings = tf.nn.embedding_lookup(embedding_matrix, indices)\n\n\n print(\"Embeddings:\")\n print(embeddings.numpy())\n ```\n \"\"\"\n return _embedding_lookup_and_transform(params=params, ids=ids, partition_strategy=partition_strategy, name=name, max_norm=max_norm, transform_fn=None)", + "docstring": "Looks up embeddings for the given from a list of tensors. This function is used to perform parallel lookups on the list of tensors in . It is a generalization of , where is interpreted as a partitioning of a large embedding tensor. may be a as returned by using with a partitioner. If , each element of is partitioned between the elements of according to the . In all strategies, if the id space does not evenly divide the number of partitions, each of the first partitions will be assigned one more id. If is , we assign each id to partition . For instance, 13 ids are split across 5 partitions as: If is , we assign ids to partitions in a contiguous manner. In this case, 13 ids are split across 5 partitions as: If the input ids are ragged tensors, partition variables are not supported and the partition strategy and the max_norm are ignored. The results of the lookup are concatenated into a dense tensor. The returned tensor has shape . Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a , created by partitioning along dimension 0. Each element must be appropriately sized for the given . ids: A or a 'RaggedTensor' with type or containing the ids to be looked up in . Caution: Out-of-bounds indices will result in undefined behavior, which will differ between devices and backends. partition_strategy: A string specifying the partitioning strategy, relevant if . Currently and are supported. Default is . name: A name for the operation (optional). validate_indices: DEPRECATED. If this operation is assigned to CPU, values in are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error. max_norm: If not , each embedding is clipped if its l2-norm is larger than this value. Returns: A or a 'RaggedTensor', depending on the input, with the same type as the tensors in . Raises: ValueError: If is empty.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py", + "ast_data": "FunctionDef name:embedding_lookup arg:params arg:ids arg:partition_strategy arg:name arg:validate_indices arg:max_norm arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_split", + "source_code": "@array_function_dispatch(_split_dispatcher)\ndef _split(a, sep=None, maxsplit=None):\n return _vec_string(a, np.object_, 'split', [sep] + _clean_args(maxsplit))", + "docstring": "For each element in , return a list of the words in the string, using as the delimiter string. Calls :meth: element-wise. Parameters ---------- a : array-like, with `sepmaxsplitmaxsplit` splits are done. Returns ------- out : ndarray Array of list objects Examples -------- >>> import numpy as np >>> x = np.array(\"Numpy is nice!\") >>> np.strings.split(x, \" \") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP >>> np.strings.split(x, \" \", 1) # doctest: +SKIP array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- str.split, rsplit", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:_split arg:a arg:sep arg:maxsplit arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "encode_exception_table_varint", + "source_code": "def encode_exception_table_varint(n: int) -> list[int]:\n assert n >= 0\n b = [n & 63]\n n >>= 6\n while n > 0:\n b.append(n & 63)\n n >>= 6\n b.reverse()\n for i in range(len(b) - 1):\n b[i] |= 64\n return b", + "docstring": "Similar to , but the 6-bit chunks are ordered in reverse.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:encode_exception_table_varint arg:n arguments arg Compare Assign While Compare Call Call For Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "get_spider_middleware", + "source_code": "def get_spider_middleware(self, cls: type[_T]) -> _T | None:\n if not self.engine:\n raise RuntimeError('Crawler.get_spider_middleware() can only be called after the crawl engine has been created.')\n return self._get_component(cls, self.engine.scraper.spidermw.middlewares)", + "docstring": "Return the run-time instance of a :ref: of the specified class or a subclass, or `engine_startedspider_opened`.", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "FunctionDef name:get_spider_middleware arg:self arg:cls arguments arg arg If Raise Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "get_mapping", + "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, 'order', [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(np.nan_to_num(x), np.intp)\n return [values[ix] if np.isfinite(x_i) else False for x_i, ix in zip(x, ixs)]\n return mapping", + "docstring": "Return a function that maps each data value to True or False.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Assign FunctionDef name:mapping arg:x arguments arg Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "is_valid_positional_slice", + "source_code": "def is_valid_positional_slice(slc: slice) -> bool:\n return lib.is_int_or_none(slc.start) and lib.is_int_or_none(slc.stop) and lib.is_int_or_none(slc.step)", + "docstring": "Check if a slice object can be interpreted as a positional indexer. Parameters ---------- slc : slice Returns ------- bool Notes ----- A valid positional slice may also be interpreted as a label-based slice depending on the index being sliced.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexers\\utils.py", + "ast_data": "FunctionDef name:is_valid_positional_slice arg:slc arguments arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "matplotlib", + "name": "unit_regular_star", + "source_code": "@classmethod\ndef unit_regular_star(cls, numVertices, innerCircle=0.5):\n if numVertices <= 16:\n path = cls._unit_regular_stars.get((numVertices, innerCircle))\n else:\n path = None\n if path is None:\n ns2 = numVertices * 2\n theta = 2 * np.pi / ns2 * np.arange(ns2 + 1)\n theta += np.pi / 2.0\n r = np.ones(ns2 + 1)\n r[1::2] = innerCircle\n verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T\n path = cls(verts, closed=True, readonly=True)\n if numVertices <= 16:\n cls._unit_regular_stars[numVertices, innerCircle] = path\n return path", + "docstring": "Return a :class: for a unit regular star with the given numVertices and radius of 1.0, centered at (0, 0).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:unit_regular_star arg:cls arg:numVertices arg:innerCircle arguments arg arg arg If Compare Assign Call Assign If Compare Assign Assign Call Assign Call Assign Assign Call Call Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "remove_override", + "source_code": "def remove_override(self, key: _K) -> None:\n self._overrides.pop(key, None)\n self._merged.pop(key, None)\n if key in self._base:\n self._merged[key] = self._base[key]", + "docstring": "Un-overrides a key-value pair.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py", + "ast_data": "FunctionDef name:remove_override arg:self arg:key arguments arg arg Call Call If Compare Assign" + }, + { + "library": "pandas", + "name": "_default_formatter", + "source_code": "def _default_formatter(x: Any, precision: int, thousands: bool=False) -> Any:\n if is_float(x) or is_complex(x):\n return f'{x:,.{precision}f}' if thousands else f'{x:.{precision}f}'\n elif is_integer(x):\n return f'{x:,}' if thousands else str(x)\n return x", + "docstring": "Format the display of a value Parameters ---------- x : Any Input variable to be formatted precision : Int Floating point precision used if `` is float or complex. thousands : bool, default False Whether to group digits with thousands separated with \",\". Returns ------- value : Any Matches input type, or string if input is float or complex or int with sep.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_default_formatter arg:x arg:precision arg:thousands arguments arg arg arg If BoolOp Call Call Return return:yes If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "operator", + "source_code": "@property\ndef operator(self) -> 'LinearOperatorInversion':\n return self._operator", + "docstring": "The operator before inversion.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_inversion.py", + "ast_data": "FunctionDef name:operator arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "StreamLogger", + "source_code": "class StreamLogger:\n\n def __init__(self, logger: logging.Logger, log_level: int=logging.INFO):\n self.logger: logging.Logger = logger\n self.log_level: int = log_level\n self.linebuf: str = ''\n\n def write(self, buf: str) -> None:\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n def flush(self) -> None:\n for h in self.logger.handlers:\n h.flush()", + "docstring": "Fake file-like stream object that redirects writes to a logger instance Taken from:", + "type": "class", + "file_path": "scrapy\\scrapy\\utils\\log.py", + "ast_data": "ClassDef name:StreamLogger FunctionDef name:__init__ arg:self arg:logger arg:log_level arguments arg arg arg FunctionDef name:write arg:self arg:buf arguments arg arg For Call Call Call Call FunctionDef name:flush arg:self arguments arg For Call" + }, + { + "library": "tensorflow", + "name": "convert_to_tensor_v1", + "source_code": "def convert_to_tensor_v1(value, dtype=None, name=None, preferred_dtype=None, dtype_hint=None) -> tensor_lib.Tensor:\n preferred_dtype = deprecation.deprecated_argument_lookup('dtype_hint', dtype_hint, 'preferred_dtype', preferred_dtype)\n return convert_to_tensor_v2(value, dtype, preferred_dtype, name)", + "docstring": "Converts the given to a (with the TF1 API).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion.py", + "ast_data": "FunctionDef name:convert_to_tensor_v1 arg:value arg:dtype arg:name arg:preferred_dtype arg:dtype_hint arguments arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dismantle_func_graph", + "source_code": "def dismantle_func_graph(func_graph):\n func_graph._function_captures.clear()\n ops.dismantle_graph(func_graph)", + "docstring": "Removes reference cycles in FuncGraph. Helpful for making sure the garbage collector doesn't need to run when the FuncGraph goes out of scope, e.g. in tests using defun with @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True). Args: func_graph: A object to destroy. is unusable after this function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:dismantle_func_graph arg:func_graph arguments arg Call Call" + }, + { + "library": "scipy", + "name": "initialize", + "source_code": "def initialize(self, n, approx_type):\n self.first_iteration = True\n self.n = n\n self.approx_type = approx_type\n if approx_type not in ('hess', 'inv_hess'):\n raise ValueError(\"`approx_type` must be 'hess' or 'inv_hess'.\")\n if self.approx_type == 'hess':\n self.B = np.eye(n, dtype=float)\n else:\n self.H = np.eye(n, dtype=float)", + "docstring": "Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:initialize arg:self arg:n arg:approx_type arguments arg arg arg Assign Assign Assign If Compare Raise Call If Compare Assign Call Assign Call" + }, + { + "library": "pandas", + "name": "transpose", + "source_code": "def transpose(self, *axes: int) -> Self:\n return self[:]", + "docstring": "Return a transposed view on this array. Because ExtensionArrays are always 1D, this is a no-op. It is included for compatibility with np.ndarray. Returns ------- ExtensionArray Examples -------- >>> pd.array([1, 2, 3]).transpose() [1, 2, 3] Length: 3, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:transpose arg:self arguments arg arg Return return:yes" + }, + { + "library": "scipy", + "name": "Problem22", + "source_code": "class Problem22(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(0, 20)]\n self.global_optimum = 9.0 * pi / 2.0\n self.fglob = exp(-27.0 * pi / 2.0) - 1.0\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return exp(-3.0 * x) - sin(x) ** 3.0", + "docstring": "Univariate Problem22 objective function. This class defines the Univariate Problem22 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem22}}(x) = e^{-3x} - \\sin^3(x) Bound constraints: :math: .. figure:: figures/Problem22.png :alt: Univariate Problem22 function :align: center **Univariate Problem22 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem22 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "add_dtypes_line", + "source_code": "def add_dtypes_line(self) -> None:\n collected_dtypes = [f'{key}({val:d})' for key, val in sorted(self.dtype_counts.items())]\n self._lines.append(f'dtypes: {', '.join(collected_dtypes)}')", + "docstring": "Add summary line with dtypes present in dataframe.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:add_dtypes_line arg:self arguments arg Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_num_elements", + "source_code": "def _num_elements(losses):\n with ops.name_scope(None, 'num_elements', values=[losses]) as scope:\n return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)", + "docstring": "Computes the number of elements in tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", + "ast_data": "FunctionDef name:_num_elements arg:losses arguments arg With Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_is_valid_shortcut", + "source_code": "def _is_valid_shortcut(self, key):\n return 'cmd+' not in key and (not key.startswith('MouseButton.'))", + "docstring": "Check for a valid shortcut to be displayed. - GTK will never send 'cmd+' (see ). - The shortcut window only shows keyboard shortcuts, not mouse buttons.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk4.py", + "ast_data": "FunctionDef name:_is_valid_shortcut arg:self arg:key arguments arg arg Return return:yes BoolOp Compare Call" + }, + { + "library": "matplotlib", + "name": "get_text_width_height_descent", + "source_code": "@classmethod\ndef get_text_width_height_descent(cls, tex, fontsize, renderer=None):\n if tex.strip() == '':\n return (0, 0, 0)\n dvifile = cls.make_dvi(tex, fontsize)\n dpi_fraction = renderer.points_to_pixels(1.0) if renderer else 1\n with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:\n page, = dvi\n return (page.width, page.height + page.descent, page.descent)", + "docstring": "Return width, height and descent of the text.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:get_text_width_height_descent arg:cls arg:tex arg:fontsize arg:renderer arguments arg arg arg arg If Compare Call Return return:yes Assign Call Assign Call With Call Assign Return return:yes" + }, + { + "library": "cryptography", + "name": "__eq__", + "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n pass", + "docstring": "Checks equality.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py", + "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "_ensure_single_chunk", + "source_code": "def _ensure_single_chunk(x: Array, axis: int) -> tuple[Array, Callable[[Array], Array]]:\n if axis < 0:\n axis += x.ndim\n if x.numblocks[axis] < 2:\n return (x, lambda x: x)\n x = x.rechunk({i: -1 if i == axis else 'auto' for i in range(x.ndim)})\n return (x, lambda x: x.rechunk())", + "docstring": "Make sure that Array is not broken into multiple chunks along axis. Returns ------- x : Array The input Array with a single chunk along axis. restore : Callable[Array, Array] function to apply to the output to rechunk it back into reasonable chunks", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py", + "ast_data": "FunctionDef name:_ensure_single_chunk arg:x arg:axis arguments arg arg If Compare If Compare Return return:yes arguments arg Assign Call Compare Call Return return:yes arguments arg Call" + }, + { + "library": "kornia", + "name": "_modified_bessel_0", + "source_code": "def _modified_bessel_0(x: Tensor) -> Tensor:\n ax = torch.abs(x)\n out = zeros_like(x)\n idx_a = ax < 3.75\n if idx_a.any():\n y = x[idx_a] / 3.75 * (x[idx_a] / 3.75)\n out[idx_a] = 1.0 + y * (3.5156229 + y * (3.0899424 + y * (1.2067492 + y * (0.2659732 + y * (0.0360768 + y * 0.0045813)))))\n idx_b = ~idx_a\n if idx_b.any():\n y = 3.75 / ax[idx_b]\n ans = 0.00916281 + y * (-0.02057706 + y * (0.02635537 + y * (-0.01647633 + y * 0.00392377)))\n coef = 0.39894228 + y * (0.01328592 + y * (0.00225319 + y * (-0.00157565 + y * ans)))\n out[idx_b] = ax[idx_b].exp() / ax[idx_b].sqrt() * coef\n return out", + "docstring": "Adapted from:", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:_modified_bessel_0 arg:x arguments arg Assign Call Assign Call Assign Compare If Call Assign Assign Assign If Call Assign Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_allow_future", + "source_code": "def get_allow_future(self):\n return self.allow_future", + "docstring": "Return if the view should be allowed to display objects from the future.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_allow_future arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "multilabel_soft_margin_loss", + "source_code": "def multilabel_soft_margin_loss(input: Tensor, target: Tensor, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(multilabel_soft_margin_loss, (input, target, weight), input, target, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))\n if weight is not None:\n loss = loss * weight\n class_dim = input.dim() - 1\n C = input.size(class_dim)\n loss = loss.sum(dim=class_dim) / C\n if reduction == 'none':\n ret = loss\n elif reduction == 'mean':\n ret = loss.mean()\n elif reduction == 'sum':\n ret = loss.sum()\n else:\n ret = input\n raise ValueError(reduction + ' is not valid')\n return ret", + "docstring": "Compute the multilabel soft margin loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Mutilabel soft margin loss.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:multilabel_soft_margin_loss arg:input arg:target arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Call If Compare Assign Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "roots_legendre", + "source_code": "def roots_legendre(n, mu=False):\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n mu0 = 2.0\n\n def an_func(k):\n return 0.0 * k\n\n def bn_func(k):\n return k * np.sqrt(1.0 / (4 * k * k - 1))\n f = _ufuncs.eval_legendre\n\n def df(n, x):\n return (-n * x * _ufuncs.eval_legendre(n, x) + n * _ufuncs.eval_legendre(n - 1, x)) / (1 - x ** 2)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)", + "docstring": "Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature [GL]_. The sample points are the roots of the nth degree Legendre polynomial :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [GL] Gauss-Legendre quadrature, Wikipedia, Examples -------- >>> import numpy as np >>> from scipy.special import roots_legendre, eval_legendre >>> roots, weights = roots_legendre(9) `roots_legendre`. Map the roots computed above from [-1, 1] to [a, b]. >>> t = (b - a)/2 * roots + (a + b)/2 Approximate the integral as the weighted sum of the function values. >>> (b - a)/2 * f(t).dot(weights) 2.1931471805599276 Compare that to the exact result, which is 3/2 + log(2): >>> 1.5 + np.log(2) 2.1931471805599454", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:roots_legendre arg:n arg:mu arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign FunctionDef name:an_func arg:k arguments arg Return return:yes FunctionDef name:bn_func arg:k arguments arg Return return:yes Call Assign FunctionDef name:df arg:n arg:x arguments arg arg Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "check", + "source_code": "def check(self, keys) -> bool:\n b64_keys = [self.prefix + self._encode(key) for key in keys]\n kvs = self._try_wait_get(b64_keys, override_timeout=datetime.timedelta(microseconds=1))\n return kvs is not None", + "docstring": "Check if all of the keys are immediately present (without waiting).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_store.py", + "ast_data": "FunctionDef name:check arg:self arg:keys arguments arg arg Assign Call Assign Call Call Return return:yes Compare" + }, + { + "library": "django", + "name": "get_host", + "source_code": "def get_host(self):\n host = self._get_raw_host()\n allowed_hosts = settings.ALLOWED_HOSTS\n if settings.DEBUG and (not allowed_hosts):\n allowed_hosts = ['.localhost', '127.0.0.1', '[::1]']\n domain, port = split_domain_port(host)\n if domain and validate_host(domain, allowed_hosts):\n return host\n else:\n msg = 'Invalid HTTP_HOST header: %r.' % host\n if domain:\n msg += ' You may need to add %r to ALLOWED_HOSTS.' % domain\n else:\n msg += ' The domain name provided is not valid according to RFC 1034/1035.'\n raise DisallowedHost(msg)", + "docstring": "Return the HTTP host using the environment or request headers.", + "type": "method", + "file_path": "django\\django\\http\\request.py", + "ast_data": "FunctionDef name:get_host arg:self arguments arg Assign Call Assign If BoolOp Assign Assign Call If BoolOp Call Return return:yes Assign If Raise Call" + }, + { + "library": "django", + "name": "add_arguments", + "source_code": "def add_arguments(self, parser):\n pass", + "docstring": "Entry point for subclassed commands to add custom arguments.", + "type": "method", + "file_path": "django\\django\\core\\management\\base.py", + "ast_data": "FunctionDef name:add_arguments arg:self arg:parser arguments arg arg" + }, + { + "library": "django", + "name": "LinearGeometryMixin", + "source_code": "class LinearGeometryMixin:\n\n def interpolate(self, distance):\n return self._topology(capi.geos_interpolate(self.ptr, distance))\n\n def interpolate_normalized(self, distance):\n return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))\n\n def project(self, point):\n from .point import Point\n if not isinstance(point, Point):\n raise TypeError('locate_point argument must be a Point')\n return capi.geos_project(self.ptr, point.ptr)\n\n def project_normalized(self, point):\n from .point import Point\n if not isinstance(point, Point):\n raise TypeError('locate_point argument must be a Point')\n return capi.geos_project_normalized(self.ptr, point.ptr)\n\n @property\n def merged(self):\n return self._topology(capi.geos_linemerge(self.ptr))\n\n @property\n def closed(self):\n return capi.geos_isclosed(self.ptr)", + "docstring": "Used for LineString and MultiLineString.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "ClassDef name:LinearGeometryMixin FunctionDef name:interpolate arg:self arg:distance arguments arg arg Return return:yes Call Call FunctionDef name:interpolate_normalized arg:self arg:distance arguments arg arg Return return:yes Call Call FunctionDef name:project arg:self arg:point arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:project_normalized arg:self arg:point arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:merged arg:self arguments arg Return return:yes Call Call FunctionDef name:closed arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "DistributedDatasetsFromFunctionSpec", + "source_code": "class DistributedDatasetsFromFunctionSpec(DistributedDatasetAndIteratorSpec):\n\n @property\n def value_type(self):\n return DistributedDatasetsFromFunction\n\n @property\n def _component_specs(self):\n specs = []\n worker_device_pairs = self._input_workers._worker_device_pairs\n for i, _ in enumerate(worker_device_pairs):\n element_spec = nest.map_structure(functools.partial(_replace_per_replica_spec, i=i), self._element_spec)\n specs.append(dataset_ops.DatasetSpec(element_spec))\n return specs\n\n def _to_components(self, value):\n return value._datasets\n\n def _from_components(self, components):\n return DistributedDatasetsFromFunction(input_workers=self._input_workers, strategy=self._strategy, components=components, element_spec=self._element_spec, options=self._options)\n\n @staticmethod\n def from_value(value):\n return DistributedDatasetsFromFunctionSpec(input_workers=value._input_workers, element_spec=value._element_spec, strategy=value._strategy, options=value._options)", + "docstring": "Type specification for `DistributedDatasetsFromFunction.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "ClassDef name:DistributedDatasetsFromFunctionSpec FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Assign Assign For Call Assign Call Call Call Call Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_rel_pos", + "source_code": "def get_rel_pos(q_size: int, k_size: int, rel_pos: Tensor) -> Tensor:\n max_rel_dist = int(2 * max(q_size, k_size) - 1)\n if rel_pos.shape[0] != max_rel_dist:\n rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n else:\n rel_pos_resized = rel_pos\n q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n return rel_pos_resized[relative_coords.long()]", + "docstring": "Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size: size of query q. k_size: size of key k. rel_pos: relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py", + "ast_data": "FunctionDef name:get_rel_pos arg:q_size arg:k_size arg:rel_pos arguments arg arg arg Assign Call Call If Compare Assign Call Call Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "add_internal", + "source_code": "def add_internal(self, sprite, layer=None):\n if not hasattr(sprite, 'dirty'):\n raise AttributeError()\n if not hasattr(sprite, 'visible'):\n raise AttributeError()\n if not hasattr(sprite, 'blendmode'):\n raise AttributeError()\n if not isinstance(sprite, DirtySprite):\n raise TypeError()\n if sprite.dirty == 0:\n sprite.dirty = 1\n LayeredUpdates.add_internal(self, sprite, layer)", + "docstring": "Do not use this method directly. It is used by the group to add a sprite internally.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:add_internal arg:self arg:sprite arg:layer arguments arg arg arg If Call Raise Call If Call Raise Call If Call Raise Call If Call Raise Call If Compare Assign Call" + }, + { + "library": "scipy", + "name": "_compute_outer_prob_inside_method", + "source_code": "def _compute_outer_prob_inside_method(m, n, g, h):\n if m < n:\n m, n = (n, m)\n mg = m // g\n ng = n // g\n minj, maxj = (0, min(int(np.ceil(h / mg)), n + 1))\n curlen = maxj - minj\n lenA = min(2 * maxj + 2, n + 1)\n dtype = np.float64\n A = np.ones(lenA, dtype=dtype)\n A[minj:maxj] = 0.0\n for i in range(1, m + 1):\n lastminj, lastlen = (minj, curlen)\n minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)\n minj = min(minj, n)\n maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)\n if maxj <= minj:\n return 1.0\n val = 0.0 if minj == 0 else 1.0\n for jj in range(maxj - minj):\n j = jj + minj\n val = (A[jj + minj - lastminj] * i + val * j) / (i + j)\n A[jj] = val\n curlen = maxj - minj\n if lastlen > curlen:\n A[maxj - minj:maxj - minj + (lastlen - curlen)] = 1\n return A[maxj - minj - 1]", + "docstring": "Count the proportion of paths that do not stay strictly inside two diagonal lines. Parameters ---------- m : integer m > 0 n : integer n > 0 g : integer g is greatest common divisor of m and n h : integer 0 <= h <= lcm(m,n) Returns ------- p : float The proportion of paths that do not stay inside the two lines. The classical algorithm counts the integer lattice paths from (0, 0) to (m, n) which satisfy |x/m - y/n| < h / lcm(m, n). The paths make steps of size +1 in either positive x or positive y directions. We are, however, interested in 1 - proportion to computes p-values, so we change the recursion to compute 1 - p directly while staying within the \"inside method\" a described by Hodges. We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. Hodges, J.L. Jr., \"The Significance Probability of the Smirnov Two-Sample Test,\" Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. For the recursion for 1-p see Viehmann, T.: \"Numerically more stable computation of the p-values for the two-sample Kolmogorov-Smirnov test,\" arXiv: 2102.08037", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_pythran.py", + "ast_data": "FunctionDef name:_compute_outer_prob_inside_method arg:m arg:n arg:g arg:h arguments arg arg arg arg If Compare Assign Assign Assign Assign Call Call Call Assign Assign Call Assign Assign Call Assign For Call Assign Assign Call Call Call Assign Call Assign Call Call Call If Compare Return return:yes Assign Compare For Call Assign Assign Assign Assign If Compare Assign Return return:yes" + }, + { + "library": "kornia", + "name": "RandomDissolving", + "source_code": "class RandomDissolving(IntensityAugmentationBase2D):\n\n def __init__(self, step_range: Tuple[float, float]=(100, 500), version: str='2.1', p: float=0.5, keepdim: bool=False, **kwargs: Any) -> None:\n super().__init__(p=p, same_on_batch=True, keepdim=keepdim)\n self.step_range = step_range\n self._dslv = StableDiffusionDissolving(version, **kwargs)\n self._param_generator = rg.PlainUniformGenerator((self.step_range, 'step_range_factor', None, None))\n\n def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n return self._dslv(input, params['step_range_factor'][0].long().item())", + "docstring": "Perform dissolving transformation using StableDiffusion models. Based on :cite:, the dissolving transformation is essentially applying one-step reverse diffusion. Our implementation currently supports HuggingFace implementations of SD 1.4, 1.5 and 2.1. SD 1.X tends to remove more details than SD2.1. .. list-table:: Title :widths: 32 32 32 :header-rows: 1 * - SD 1.4 - SD 1.5 - SD 2.1 * - figure:: - figure:: - figure:: Args: p: probability of applying the transformation. version: the version of the stable diffusion model. step_range: the step range of the diffusion model steps. Higher the step, stronger the dissolving effects. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). **kwargs: additional arguments for for HF StableDiffusionPipeline. Shape: - Input: :math: or :math:. - Output: :math:", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\dissolving.py", + "ast_data": "ClassDef name:RandomDissolving FunctionDef name:__init__ arg:self arg:step_range arg:version arg:p arg:keepdim arguments arg arg arg arg arg arg Call Call Assign Assign Call Assign Call FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_type_proto", + "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[struct_pb2.TensorSpecProto]:\n return struct_pb2.TensorSpecProto", + "docstring": "Returns the type of proto associated with TensorSpec serialization.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "randperm", + "source_code": "def randperm(n: int, ensure_perm: bool=True, **kwargs: Any) -> Tensor:\n perm = torch.randperm(n, **kwargs)\n if ensure_perm:\n while torch.all(torch.eq(perm, torch.arange(n, device=perm.device))):\n perm = torch.randperm(n, **kwargs)\n return perm", + "docstring": "with the ability to ensure the different arrangement generated.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\utils.py", + "ast_data": "FunctionDef name:randperm arg:n arg:ensure_perm arguments arg arg arg Assign Call If While Call Call Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "solarize_add", + "source_code": "def solarize_add(min_mag: float, max_mag: float) -> OperationBase:\n return SolarizeAdd(None, 1.0, magnitude_range=(min_mag, max_mag))", + "docstring": "Return SolarizeAdd op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py", + "ast_data": "FunctionDef name:solarize_add arg:min_mag arg:max_mag arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_infinite_like", + "source_code": "def _infinite_like(tensor):\n return torch.full_like(tensor, inf)", + "docstring": "Helper function for obtaining infinite KL Divergence throughout", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\kl.py", + "ast_data": "FunctionDef name:_infinite_like arg:tensor arguments arg Return return:yes Call" + }, + { + "library": "scrapy", + "name": "stop", + "source_code": "def stop(self) -> Deferred[None]:\n\n @deferred_f_from_coro_f\n async def _finish_stopping_engine(_: Any) -> None:\n await self.signals.send_catch_log_async(signal=signals.engine_stopped)\n self._closewait.callback(None)\n if not self.running:\n raise RuntimeError('Engine not running')\n self.running = False\n dfd = self.close_spider(self.spider, reason='shutdown') if self.spider is not None else succeed(None)\n return dfd.addBoth(_finish_stopping_engine)", + "docstring": "Gracefully stop the execution engine", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\engine.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg AsyncFunctionDef name:_finish_stopping_engine arg:_ arguments arg Call Call If Raise Call Assign Assign Compare Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "report", + "source_code": "@cherrypy.expose\ndef report(self, name):\n filename, statements, excluded, missing, _ = self.coverage.analysis2(name)\n pc = _percent(statements, missing)\n yield (TEMPLATE_COVERAGE % dict(name=os.path.basename(name), fullpath=name, pc=pc))\n yield '\\n'\n for line in self.annotated_file(filename, statements, excluded, missing):\n yield line\n yield '
'\n yield ''\n yield ''", + "docstring": "Render coverage stats as HTML.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\covercp.py", + "ast_data": "FunctionDef name:report arg:self arg:name arguments arg arg Assign Call Assign Call Call Call For Call" + }, + { + "library": "tensorflow", + "name": "to_dense", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef to_dense(tensor):\n if is_sparse(tensor):\n return sparse_ops.sparse_tensor_to_dense(tensor)\n else:\n return tensor", + "docstring": "Converts a sparse tensor into a dense tensor and returns it. Args: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:to_dense arg:tensor arguments arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "kornia", + "name": "rot_x", + "source_code": "@classmethod\ndef rot_x(cls, x: Tensor) -> Se3:\n zs = zeros_like(x)\n return cls(So3.rot_x(x), stack((zs, zs, zs), -1))", + "docstring": "Construct a x-axis rotation. Args: x: the x-axis rotation angle.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:rot_x arg:cls arg:x arguments arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "linprog_terse_callback", + "source_code": "def linprog_terse_callback(res):\n nit = res['nit']\n x = res['x']\n if nit == 0:\n print('Iter: X:')\n print(f'{nit: <5d} ', end='')\n print(x)", + "docstring": "A sample callback function demonstrating the linprog callback interface. This callback produces brief output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A consisting of the following fields: x : 1-D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, `` : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog.py", + "ast_data": "FunctionDef name:linprog_terse_callback arg:res arguments arg Assign Assign If Compare Call Call Call" + }, + { + "library": "pandas", + "name": "__contains__", + "source_code": "def __contains__(self, key) -> bool:\n if is_valid_na_for_dtype(key, self.categories.dtype):\n return bool(self.isna().any())\n return contains(self, key, container=self._codes)", + "docstring": "Returns True if is in this Categorical.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Call Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_iterator_element_layouts", + "source_code": "def set_iterator_element_layouts(self, iterator_resource_dtensor, layouts: List[layout_lib.Layout]):\n _pywrap_dtensor_device.SetIteratorElementLayouts(context.context()._handle, iterator_resource_dtensor, [layout.to_string() for layout in layouts], self._device_info)", + "docstring": "Sets the element layouts on an iterator resource tensor. Args: iterator_resource_dtensor: a DTensor created by packing the individiual iterator resource tensors. layouts: the flattened list of layouts to be applied to the elements emitted by the iterator resource DTensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", + "ast_data": "FunctionDef name:set_iterator_element_layouts arg:self arg:iterator_resource_dtensor arg:layouts arguments arg arg arg Call Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, X, y=None):\n return self", + "docstring": "Do nothing and return the estimator unchanged. This method is just there to implement the usual API and hence work in pipelines. Parameters ---------- X : Ignored Not used, present for API consistency by convention. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes" + }, + { + "library": "pandas", + "name": "asi8", + "source_code": "@property\ndef asi8(self) -> npt.NDArray[np.int64]:\n return self._ndarray.view('i8')", + "docstring": "Integer representation of the values. Returns ------- ndarray An ndarray with int64 dtype.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:asi8 arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_assert_float_dtype", + "source_code": "def _assert_float_dtype(dtype):\n if not dtype.is_floating:\n raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n return dtype", + "docstring": "Validate and return floating point type based on . must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if is not a floating point type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_assert_float_dtype arg:dtype arguments arg If Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "parse_args", + "source_code": "def parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description=' System-level Usage Logger ')\n parser.add_argument('--debug', action='store_true', help='Enable debug mode')\n parser.add_argument('--log-interval', type=float, default=5, help='set time interval for logging utilization data, default is 5 seconds')\n parser.add_argument('--data-collect-interval', type=float, default=1, help='set time interval to collect data, default is 1 second, this should not longer than log_interval')\n args = parser.parse_args()\n return args", + "docstring": "Parse command line arguments. Returns: argparse.Namespace: Parsed arguments.", + "type": "function", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:parse_args arguments Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "replace", + "source_code": "def replace(obj, /, **changes):\n cls = obj.__class__\n func = getattr(cls, '__replace__', None)\n if func is None:\n raise TypeError(f'replace() does not support {cls.__name__} objects')\n return func(obj, **changes)", + "docstring": "Return a new object replacing specified fields with new values. This is especially useful for immutable objects, like named tuples or frozen dataclasses.", + "type": "function", + "file_path": "django\\django\\utils\\copy.py", + "ast_data": "FunctionDef name:replace arguments arg arg Assign Assign Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "KFold", + "source_code": "class KFold(_UnsupportedGroupCVMixin, _BaseKFold):\n\n def __init__(self, n_splits=5, *, shuffle=False, random_state=None):\n super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)\n\n def _iter_test_indices(self, X, y=None, groups=None):\n n_samples = _num_samples(X)\n indices = np.arange(n_samples)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_splits = self.n_splits\n fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)\n fold_sizes[:n_samples % n_splits] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = (current, current + fold_size)\n yield indices[start:stop]\n current = stop", + "docstring": "K-Fold cross-validator. Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used once as a validation while the k - 1 remaining folds form the training set. Read more in the :ref:. For visualisation of cross-validation behaviour and comparison between common scikit-learn split methods refer to :ref: Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. .. versionchanged:: 0.22 `shufflerandom_stateGlossary random_state` to an integer. See Also -------- StratifiedKFold : Takes class information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). GroupKFold : K-fold iterator variant with non-overlapping groups. RepeatedKFold : Repeats K-Fold n times.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "ClassDef name:KFold FunctionDef name:__init__ arg:self arg:n_splits arguments arg arg arg arg Call Call FunctionDef name:_iter_test_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call If Call Call Assign Assign Call Assign For Assign Assign" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, storage_writer: StorageWriter, storage_reader: StorageReader, *, process_group: Optional[dist.ProcessGroup]=None, coordinator_rank: int=0, no_dist: bool=False, load_planner: Optional[LoadPlanner]=None, save_planner: Optional[SavePlanner]=None):\n self.storage_writer = storage_writer\n self.storage_reader = storage_reader\n self.process_group = process_group\n self.coordinator_rank = coordinator_rank\n self.no_dist = no_dist\n self.load_planner = load_planner\n self.save_planner = save_planner", + "docstring": "Initializes the Checkpointer instance. Args: storage_writer: Instance of StorageWrite use to perform writes. storage_reader: StorageReader used to load data from. process_group: ProcessGroup to be used for cross-rank synchronization. coordinator_rank: Rank to use to coordinate the checkpoint. rank0 is used by default. no_dist: If ``) loader_planner: Instance of LoadPlanner to use when loading. save_planner: Instance of SavePlanner to use when saving.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:storage_writer arg:storage_reader arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self):\n with torch.no_grad():\n for name, configs in self.data_groups.items():\n data = configs['data']\n self.update_mask(name, data, configs)\n self.data_groups[name].pop('data')", + "docstring": "Internally calls the update_mask() function for each layer", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:step arg:self arguments arg With Call For Call Assign Call Call" + }, + { + "library": "django", + "name": "_prepare_messages", + "source_code": "def _prepare_messages(self, messages):\n for message in messages:\n message._prepare()", + "docstring": "Prepare a list of messages for storage.", + "type": "method", + "file_path": "django\\django\\contrib\\messages\\storage\\base.py", + "ast_data": "FunctionDef name:_prepare_messages arg:self arg:messages arguments arg arg For Call" + }, + { + "library": "sphinx", + "name": "before_content", + "source_code": "def before_content(self) -> None:\n prefix = None\n if self.names:\n fullname, name_prefix = self.names[-1]\n if self.allow_nesting:\n prefix = fullname\n elif name_prefix:\n prefix = name_prefix.strip('.')\n if prefix:\n self.env.ref_context['py:class'] = prefix\n if self.allow_nesting:\n classes = self.env.ref_context.setdefault('py:classes', [])\n classes.append(prefix)\n if 'module' in self.options:\n modules = self.env.ref_context.setdefault('py:modules', [])\n modules.append(self.env.ref_context.get('py:module'))\n self.env.ref_context['py:module'] = self.options['module']", + "docstring": "Handle object nesting before content :py:class: represents Python language constructs. For constructs that are nestable, such as a Python classes, this method will build up a stack of the nesting hierarchy so that it can be later de-nested correctly, in :py:meth:. For constructs that aren't nestable, the stack is bypassed, and instead only the most recent object is tracked. This object prefix name will be removed with :py:meth:.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\python\\_object.py", + "ast_data": "FunctionDef name:before_content arg:self arguments arg Assign If Assign If Assign If Assign Call If Assign If Assign Call Call If Compare Assign Call Call Call Assign" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, marker, fillstyle=None, transform=None, capstyle=None, joinstyle=None):\n self._marker_function = None\n self._user_transform = transform\n self._user_capstyle = CapStyle(capstyle) if capstyle is not None else None\n self._user_joinstyle = JoinStyle(joinstyle) if joinstyle is not None else None\n self._set_fillstyle(fillstyle)\n self._set_marker(marker)", + "docstring": "Parameters ---------- marker : str, array-like, Path, MarkerStyle - Another instance of copies the details of that *marker*. - For other possible marker values, see the module docstring . fillstyle : str, default: :rc: One of 'full', 'left', 'right', 'bottom', 'top', 'none'. transform : , optional Transform that will be combined with the native transform of the marker. capstyle : or %(CapStyle)s, optional Cap style that will override the default cap style of the marker. joinstyle : or %(JoinStyle)s, optional Join style that will override the default join style of the marker.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\markers.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:marker arg:fillstyle arg:transform arg:capstyle arg:joinstyle arguments arg arg arg arg arg arg Assign Assign Assign Compare Call Assign Compare Call Call Call" + }, + { + "library": "django", + "name": "as_sqlite", + "source_code": "def as_sqlite(self, compiler, connection, **extra_context):\n return super().as_sqlite(compiler, connection, function='MAX', **extra_context)", + "docstring": "Use the MAX function on SQLite.", + "type": "method", + "file_path": "django\\django\\db\\models\\functions\\comparison.py", + "ast_data": "FunctionDef name:as_sqlite arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_get_dist_to_box", + "source_code": "def _get_dist_to_box(self, rotation, x0, y0, figure_box):\n if rotation > 270:\n quad = rotation - 270\n h1 = (y0 - figure_box.y0) / math.cos(math.radians(quad))\n h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))\n elif rotation > 180:\n quad = rotation - 180\n h1 = (x0 - figure_box.x0) / math.cos(math.radians(quad))\n h2 = (y0 - figure_box.y0) / math.cos(math.radians(90 - quad))\n elif rotation > 90:\n quad = rotation - 90\n h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))\n h2 = (x0 - figure_box.x0) / math.cos(math.radians(90 - quad))\n else:\n h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))\n h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))\n return min(h1, h2)", + "docstring": "Return the distance from the given points to the boundaries of a rotated box, in pixels.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:_get_dist_to_box arg:self arg:rotation arg:x0 arg:y0 arg:figure_box arguments arg arg arg arg arg If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "extract_valid_libs", + "source_code": "def extract_valid_libs(filepath):\n\n def repository_rule(**kwargs):\n del kwargs\n with open(filepath, 'r') as f:\n f_globals = {'repository_rule': repository_rule}\n f_locals = {}\n exec(f.read(), f_globals, f_locals)\n return set(f_locals['VALID_LIBS'])", + "docstring": "Evaluate syslibs_configure.bzl, return the VALID_LIBS set from that file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\builds\\check_system_libs.py", + "ast_data": "FunctionDef name:extract_valid_libs arg:filepath arguments arg FunctionDef name:repository_rule arguments arg With Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_cmap", + "source_code": "def get_cmap(self):\n return self._colorizer.cmap", + "docstring": "Return the instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:get_cmap arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "AnchoredText", + "source_code": "class AnchoredText(AnchoredOffsetbox):\n\n def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):\n if prop is None:\n prop = {}\n badkwargs = {'va', 'verticalalignment'}\n if badkwargs & set(prop):\n raise ValueError('Mixing verticalalignment with AnchoredText is not supported.')\n self.txt = TextArea(s, textprops=prop)\n fp = self.txt._text.get_fontproperties()\n super().__init__(loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp, **kwargs)", + "docstring": "AnchoredOffsetbox with Text.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "ClassDef name:AnchoredText FunctionDef name:__init__ arg:self arg:s arg:loc arguments arg arg arg arg arg arg arg If Compare Assign Assign If Call Raise Call Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "add_push_null_call_function_ex", + "source_code": "def add_push_null_call_function_ex(inst_or_insts: Union[Instruction, list[Instruction]]) -> list[Instruction]:\n if isinstance(inst_or_insts, Instruction):\n insts = [inst_or_insts]\n else:\n insts = inst_or_insts\n if sys.version_info < (3, 11):\n return insts\n idx = -1 if sys.version_info >= (3, 13) else 0\n if insts[idx].opname == 'LOAD_GLOBAL':\n assert insts[idx].arg is not None\n if insts[idx].arg & 1 == 0:\n insts[idx].arg |= 1\n return insts\n if sys.version_info >= (3, 13):\n insts = insts + [create_instruction('PUSH_NULL')]\n else:\n insts = [create_instruction('PUSH_NULL')] + insts\n return insts", + "docstring": "Like add_push_null, but the low bit of LOAD_ATTR/LOAD_SUPER_ATTR is not set, due to an expected CALL_FUNCTION_EX instruction.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:add_push_null_call_function_ex arg:inst_or_insts arguments arg If Call Assign Assign If Compare Return return:yes Assign Compare If Compare Compare If Compare Return return:yes If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "issubsctype", + "source_code": "@set_module('numpy')\ndef issubsctype(arg1, arg2):\n return issubclass(obj2sctype(arg1), obj2sctype(arg2))", + "docstring": "Determine if the first argument is a subclass of the second argument. Parameters ---------- arg1, arg2 : dtype or dtype specifier Data-types. Returns ------- out : bool The result. See Also -------- issctype, issubdtype, obj2sctype Examples -------- >>> from numpy._core import issubsctype >>> issubsctype('S8', str) False >>> issubsctype(np.array([1]), int) True >>> issubsctype(np.array([1]), float) False", + "type": "function", + "file_path": "numpy\\numpy\\_core\\numerictypes.py", + "ast_data": "FunctionDef name:issubsctype arg:arg1 arg:arg2 arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "seaborn", + "name": "__init__", + "source_code": "def __init__(self, *, bw_method=None, bw_adjust=1, gridsize=200, cut=3, clip=None, cumulative=False):\n if clip is None:\n clip = (None, None)\n self.bw_method = bw_method\n self.bw_adjust = bw_adjust\n self.gridsize = gridsize\n self.cut = cut\n self.clip = clip\n self.cumulative = cumulative\n if cumulative and _no_scipy:\n raise RuntimeError('Cumulative KDE evaluation requires scipy')\n self.support = None", + "docstring": "Initialize the estimator with its parameters. Parameters ---------- bw_method : string, scalar, or callable, optional Method for determining the smoothing bandwidth to use; passed to :class:. bw_adjust : number, optional Factor that multiplicatively scales the value chosen using ``. Increasing will make the curve smoother. See Notes. gridsize : int, optional Number of points on each dimension of the evaluation grid. cut : number, optional Factor, multiplied by the smoothing bandwidth, that determines how far the evaluation grid extends past the extreme datapoints. When set to 0, truncate the curve at the data limits. clip : pair of numbers or None, or a pair of such pairs Do not evaluate the density outside of these limits. cumulative : bool, optional If True, estimate a cumulative distribution function. Requires scipy.", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Assign Assign If BoolOp Raise Call Assign" + }, + { + "library": "pytorch", + "name": "python_type_for_torch", + "source_code": "def python_type_for_torch(dtyp):\n if dtyp.is_floating_point:\n typ = float\n elif dtyp.is_complex:\n typ = complex\n elif dtyp == torch.bool:\n typ = bool\n else:\n typ = int\n return typ", + "docstring": "Get a python scalar type a torch dtype", + "type": "function", + "file_path": "pytorch\\torch\\_numpy\\_dtypes_impl.py", + "ast_data": "FunctionDef name:python_type_for_torch arg:dtyp arguments arg If Assign If Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "loc", + "source_code": "@property\ndef loc(self):\n return self._loc", + "docstring": "Distribution parameter for the mean.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py", + "ast_data": "FunctionDef name:loc arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "test_db_signature", + "source_code": "def test_db_signature(self):\n settings_dict = self.connection.settings_dict\n return (settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], self._get_test_db_name())", + "docstring": "Return a tuple with elements of self.connection.settings_dict (a DATABASES setting value) that uniquely identify a database accordingly to the RDBMS particularities.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\creation.py", + "ast_data": "FunctionDef name:test_db_signature arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "identity", + "source_code": "def identity(self):\n flow = array_ops.identity(self._flow)\n return build_ta_with_new_flow(self, flow)", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:identity arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "paintEvent", + "source_code": "def paintEvent(self, event):\n self._draw_idle()\n if not hasattr(self, 'renderer'):\n return\n painter = QtGui.QPainter(self)\n try:\n rect = event.rect()\n width = rect.width() * self.device_pixel_ratio\n height = rect.height() * self.device_pixel_ratio\n left, top = self.mouseEventCoords(rect.topLeft())\n bottom = top - height\n right = left + width\n bbox = Bbox([[left, bottom], [right, top]])\n buf = memoryview(self.copy_from_bbox(bbox))\n if QT_API == 'PyQt6':\n from PyQt6 import sip\n ptr = int(sip.voidptr(buf))\n else:\n ptr = buf\n painter.eraseRect(rect)\n qimage = QtGui.QImage(ptr, buf.shape[1], buf.shape[0], QtGui.QImage.Format.Format_RGBA8888)\n qimage.setDevicePixelRatio(self.device_pixel_ratio)\n origin = QtCore.QPoint(rect.left(), rect.top())\n painter.drawImage(origin, qimage)\n if QT_API == 'PySide2' and QtCore.__version_info__ < (5, 12):\n ctypes.c_long.from_address(id(buf)).value = 1\n self._draw_rect_callback(painter)\n finally:\n painter.end()", + "docstring": "Copy the image from the Agg canvas to the qt.drawable. In Qt, all drawing should be done inside of here when a widget is shown onscreen.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qtagg.py", + "ast_data": "FunctionDef name:paintEvent arg:self arg:event arguments arg arg Call If Call Return return:no Assign Call Try Assign Call Assign Call Assign Call Assign Call Call Assign Assign Assign Call Assign Call Call If Compare Assign Call Call Assign Call Assign Call Call Assign Call Call Call Call If BoolOp Compare Compare Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "enable_batch_variable_initialization", + "source_code": "def enable_batch_variable_initialization():\n return _EXPERIMENTAL_TPU_BATCH_VARIABLE_INITIALIZATION and context.executing_eagerly() and (not save_context.in_save_context())", + "docstring": "Whether to batch variable initialization in tf.function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:enable_batch_variable_initialization arguments Return return:yes BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "create_load_python_module", + "source_code": "def create_load_python_module(self, mod) -> Instruction:\n output = self.tx.output\n global_scope = output.global_scope\n name = re.sub('^.*[.]', '', mod.__name__)\n if global_scope.get(name, None) is mod:\n return self.create_load_global(name, add=True)\n prefix = f'___module_{name}'\n global_name = self.tx.output.install_global_by_id(prefix, mod)\n return self.create_load_global(global_name, add=True)", + "docstring": "Generate a LOAD_GLOBAL instruction to fetch a given python module.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\codegen.py", + "ast_data": "FunctionDef name:create_load_python_module arg:self arg:mod arguments arg arg Assign Assign Assign Call If Compare Call Return return:yes Call Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "starting_wall_time", + "source_code": "def starting_wall_time(self):\n return self._reader.starting_wall_time()", + "docstring": "Wall timestamp for when the debugged TensorFlow program started. Returns: Stating wall time as seconds since the epoch, as a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:starting_wall_time arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_cell", + "source_code": "def get_cell(self, *labels):\n if len(labels) != self._label_length:\n raise ValueError('The {} expects taking {} labels'.format(self._metric_name, self._label_length))\n return self._metric_methods[self._label_length].get_cell(self._metric, *labels)", + "docstring": "Retrieves the cell.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg If Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "default", + "source_code": "def default(self, obj):\n if isinstance(obj, tensor_shape.TensorShape):\n items = obj.as_list() if obj.rank is not None else None\n return {'class_name': 'TensorShape', 'items': items}\n return get_json_type(obj)", + "docstring": "Encodes objects for types that aren't handled by the default encoder.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\json_utils.py", + "ast_data": "FunctionDef name:default arg:self arg:obj arguments arg arg If Call Assign Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "on_epoch_end", + "source_code": "def on_epoch_end(self, epoch, logs=None):\n self._log_epoch_metrics(epoch, logs)\n if self.histogram_freq and epoch % self.histogram_freq == 0:\n self._log_weights(epoch)\n if self.embeddings_freq and epoch % self.embeddings_freq == 0:\n self._log_embeddings(epoch)", + "docstring": "Runs metrics and histogram summaries at epoch end.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg Call If BoolOp Compare Call If BoolOp Compare Call" + }, + { + "library": "django", + "name": "_generate_cache_header_key", + "source_code": "def _generate_cache_header_key(key_prefix, request):\n url = md5(request.build_absolute_uri().encode('ascii'), usedforsecurity=False)\n cache_key = 'views.decorators.cache.cache_header.%s.%s' % (key_prefix, url.hexdigest())\n return _i18n_cache_key_suffix(request, cache_key)", + "docstring": "Return a cache key for the header cache.", + "type": "function", + "file_path": "django\\django\\utils\\cache.py", + "ast_data": "FunctionDef name:_generate_cache_header_key arg:key_prefix arg:request arguments arg arg Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__new__", + "source_code": "@staticmethod\n@torch._disable_dynamo\ndef __new__(cls, local_tensor: torch.Tensor, spec: DTensorSpec, *, requires_grad: bool) -> 'DTensor':\n if local_tensor.requires_grad and (not requires_grad):\n warnings.warn(\"To construct DTensor from torch.Tensor, it's recommended to use local_tensor.detach() and make requires_grad consistent.\")\n assert spec.tensor_meta is not None, 'TensorMeta should not be None!'\n r = torch.Tensor._make_wrapper_subclass(cls, spec.tensor_meta.shape, strides=spec.tensor_meta.stride, dtype=local_tensor.dtype, device=local_tensor.device, layout=local_tensor.layout, requires_grad=requires_grad)\n r._spec = spec\n r._local_tensor = local_tensor\n return r", + "docstring": "Construct a DTensor from a local tensor, device mesh, and placement and other tensor properties (i.e. shape, requires_grad, strides, etc). .. note:: This is not a public API and it's only supposed to be used by the operator implementations and internals. If you want to construct a DTensor from a local tensor, consider using ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:__new__ arg:cls arg:local_tensor arg:spec arguments arg arg arg arg If BoolOp Call Compare Assign Call Assign Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "Page", + "source_code": "class Page:\n title = 'Untitled Page'\n\n def header(self):\n return '\\n \\n \\n %s\\n \\n \\n

%s

\\n ' % (self.title, self.title)\n\n def footer(self):\n return '\\n \\n \\n '", + "docstring": "Web page base class.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py", + "ast_data": "ClassDef name:Page Assign FunctionDef name:header arg:self arguments arg Return return:yes FunctionDef name:footer arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "capabilities", + "source_code": "def capabilities(self):\n return {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}", + "docstring": "Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **\"boolean indexing\"**: boolean indicating whether an array library supports boolean indexing. Always `` for CuPy. See for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py", + "ast_data": "FunctionDef name:capabilities arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "get_signature_prefix", + "source_code": "def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]:\n return []", + "docstring": "May return a prefix to put before the object name in the signature.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\python\\_object.py", + "ast_data": "FunctionDef name:get_signature_prefix arg:self arg:sig arguments arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "track_tf_optimizer", + "source_code": "def track_tf_optimizer(tf_optimizer):\n if context.executing_eagerly():\n return\n optimizers = _GRAPH_TF_OPTIMIZERS[None]\n optimizers.add(tf_optimizer)", + "docstring": "Tracks the given TF optimizer for initialization of its variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:track_tf_optimizer arg:tf_optimizer arguments arg If Call Return return:no Assign Call" + }, + { + "library": "tensorflow", + "name": "op", + "source_code": "@property\ndef op(self):\n return self._op", + "docstring": "The operation that failed, if known. *N.B.* If the failed op was synthesized at runtime, e.g. a or op, there will be no corresponding object. In that case, this will return , and you should instead use the to discover information about the op. Returns: The that failed, or None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_PhantomRoot", + "source_code": "@dataclasses.dataclass\nclass _PhantomRoot:\n name: str\n constraint_range: 'StrictMinMaxConstraint'\n val: int", + "docstring": "This represents the root of a derived Dim where the root does not directly specify the shape of any input dimension, but the derived Dim does. e.g., the input shapes 2*dim and dim + 1 are related via a \"phantom\" dim. The fields , , and carried by a phantom root help create a symbol for it. Any derived dims with this phantom root are backed by expressions over this symbol.", + "type": "class", + "file_path": "pytorch\\torch\\export\\dynamic_shapes.py", + "ast_data": "ClassDef name:_PhantomRoot" + }, + { + "library": "tensorflow", + "name": "_global_distribute_strategy_scope", + "source_code": "@property\ndef _global_distribute_strategy_scope(self):\n if not hasattr(self._thread_local, 'distribute_strategy_scope'):\n self._thread_local.distribute_strategy_scope = None\n return self._thread_local.distribute_strategy_scope", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_global_distribute_strategy_scope arg:self arguments arg If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "job_name", + "source_code": "@tf_export('experimental.dtensor.job_name', v1=[])\ndef job_name() -> str:\n return os.environ.get(_DT_JOB_NAME, 'localhost' if num_clients() == 1 else 'worker')", + "docstring": "Returns the job name used by all clients in this DTensor cluster.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py", + "ast_data": "FunctionDef name:job_name arguments Return return:yes Call Compare Call Call" + }, + { + "library": "tensorflow", + "name": "combined_commuting_positive_definite_hint", + "source_code": "def combined_commuting_positive_definite_hint(operator_a, operator_b):\n if operator_a.is_positive_definite is True and operator_a.is_self_adjoint is True and (operator_b.is_positive_definite is True) and (operator_b.is_self_adjoint is True):\n return True\n return None", + "docstring": "Get combined PD hint for compositions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py", + "ast_data": "FunctionDef name:combined_commuting_positive_definite_hint arg:operator_a arg:operator_b arguments arg arg If BoolOp Compare Compare Compare Compare Return return:yes Return return:no" + }, + { + "library": "cherrypy", + "name": "run", + "source_code": "def run(self, point):\n self.run_hooks(iter(sorted(self[point])))", + "docstring": "Execute all registered Hooks (callbacks) for the given point.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cprequest.py", + "ast_data": "FunctionDef name:run arg:self arg:point arguments arg arg Call Call Call" + }, + { + "library": "pytorch", + "name": "build_ArmComputeLibrary", + "source_code": "def build_ArmComputeLibrary() -> None:\n print('Building Arm Compute Library')\n acl_build_flags = ['debug=0', 'neon=1', 'opencl=0', 'os=linux', 'openmp=1', 'cppthreads=0', 'arch=armv8a', 'multi_isa=1', 'fixed_format_kernels=1', 'build=native']\n acl_install_dir = '/acl'\n acl_checkout_dir = os.getenv('ACL_SOURCE_DIR', 'ComputeLibrary')\n if os.path.isdir(acl_install_dir):\n shutil.rmtree(acl_install_dir)\n if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):\n check_call(['git', 'clone', 'https://github.com/ARM-software/ComputeLibrary.git', '-b', 'v25.02', '--depth', '1', '--shallow-submodules'])\n check_call(['scons', 'Werror=1', f'-j{os.cpu_count()}'] + acl_build_flags, cwd=acl_checkout_dir)\n for d in ['arm_compute', 'include', 'utils', 'support', 'src', 'build']:\n shutil.copytree(f'{acl_checkout_dir}/{d}', f'{acl_install_dir}/{d}')", + "docstring": "Using ArmComputeLibrary for aarch64 PyTorch", + "type": "function", + "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py", + "ast_data": "FunctionDef name:build_ArmComputeLibrary arguments Call Assign Assign Assign Call If Call Call If BoolOp Call Call Call Call Call Call For Call" + }, + { + "library": "pytorch", + "name": "_compare_onnx_pytorch_model", + "source_code": "def _compare_onnx_pytorch_model(pt_model: _ModelType, onnx_model_f: str | io.BytesIO, input_args: _InputArgsType, input_kwargs: _InputKwargsType | None, additional_test_inputs: Sequence[_InputArgsType] | None, options: VerificationOptions):\n onnx_session = _onnx_backend_session(onnx_model_f, options.backend)\n\n def compare_onnx_pytorch_model_with_input(input_args, input_kwargs):\n pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs)\n pt_model_copy = _try_clone_model(pt_model)\n pt_outs = pt_model_copy(*pt_args, **pt_kwargs)\n onnx_inputs = _prepare_input_for_onnx(input_args, input_kwargs, options.remained_onnx_input_idx, options.flatten)\n onnx_outs = _run_onnx(onnx_session, onnx_inputs)\n _compare_onnx_pytorch_outputs(onnx_outs=onnx_outs, pt_outs=pt_outs, options=options)\n compare_onnx_pytorch_model_with_input(input_args, input_kwargs)\n if additional_test_inputs:\n for test_input_args in additional_test_inputs:\n compare_onnx_pytorch_model_with_input(test_input_args, {})", + "docstring": "Compare outputs from ONNX model runs with outputs from PyTorch model runs. Args: pt_model: PyTorch model. onnx_model_f: ONNX model file path or file-like object. input_args: positional arguments for PyTorch model forward method. input_kwargs: keyword arguments for PyTorch model forward method. additional_test_inputs: additional positional arguments for PyTorch model forward method. options: options for verification. Raises: AssertionError: if outputs from ONNX model and PyTorch model are not equal up to specified precision.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_compare_onnx_pytorch_model arg:pt_model arg:onnx_model_f arg:input_args arg:input_kwargs arg:additional_test_inputs arg:options arguments arg arg arg arg arg arg Assign Call FunctionDef name:compare_onnx_pytorch_model_with_input arg:input_args arg:input_kwargs arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call If For Call" + }, + { + "library": "scikit-learn", + "name": "make_nonnegative", + "source_code": "def make_nonnegative(X, min_value=0):\n min_ = X.min()\n if min_ < min_value:\n if sparse.issparse(X):\n raise ValueError('Cannot make the data matrix nonnegative because it is sparse. Adding a value to every entry would make it no longer sparse.')\n X = X + (min_value - min_)\n return X", + "docstring": "Ensure >= . Parameters ---------- X : array-like The matrix to make non-negative. min_value : float, default=0 The threshold value. Returns ------- array-like The thresholded array. Raises ------ ValueError When X is sparse.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:make_nonnegative arg:X arg:min_value arguments arg arg Assign Call If Compare If Call Raise Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "create_description", + "source_code": "def create_description(self, complib, complevel: int | None, fletcher32: bool, expectedrows: int | None) -> dict[str, Any]:\n if expectedrows is None:\n expectedrows = max(self.nrows_expected, 10000)\n d = {'name': 'table', 'expectedrows': expectedrows}\n d['description'] = {a.cname: a.typ for a in self.axes}\n if complib:\n if complevel is None:\n complevel = self._complevel or 9\n filters = _tables().Filters(complevel=complevel, complib=complib, fletcher32=fletcher32 or self._fletcher32)\n d['filters'] = filters\n elif self._filters is not None:\n d['filters'] = self._filters\n return d", + "docstring": "create the description of the table from the axes & values", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:create_description arg:self arg:complib arg:complevel arg:fletcher32 arg:expectedrows arguments arg arg arg arg arg If Compare Assign Call Assign Assign If If Compare Assign BoolOp Assign Call Call BoolOp Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_or_restore_slot_variable", + "source_code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n named_slots = self._slot_dict(slot_name)\n variable_key = _var_key(variable)\n slot_variable = named_slots.get(variable_key, None)\n if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack):\n initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n slot_variable = self._get_or_make_slot_with_initializer(var=variable, initializer=initializer, shape=variable.shape, dtype=variable.dtype, slot_name=slot_name, op_name=self._name)\n if slot_variable is not None:\n slot_variable_position.restore(slot_variable)\n else:\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)", + "docstring": "Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A object indicating the slot variable object to be restored. slot_name: The name of this 's slot to restore into. variable: The variable object this slot is being created for.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Call Call Call Assign Call Assign Call If Compare Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "mode", + "source_code": "@property\ndef mode(self):\n return self.__mode", + "docstring": "Returns the mode in which the file was opened.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:mode arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "IteratorDecorator", + "source_code": "class IteratorDecorator:\n\n def __init__(self, iterator, datapipe, iterator_id, has_next_method):\n self.iterator = iterator\n self.datapipe = datapipe\n self.iterator_id = iterator_id\n self._profiler_enabled = torch.autograd._profiler_enabled()\n self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method\n\n def __iter__(self):\n return self\n\n def _get_next(self):\n _check_iterator_valid(self.datapipe, self.iterator_id)\n result = next(self.iterator)\n if not self.self_and_has_next_method:\n self.datapipe._number_of_samples_yielded += 1\n return result\n\n def __next__(self):\n if self._profiler_enabled:\n with profiler_record_fn_context(self.datapipe):\n return self._get_next()\n else:\n return self._get_next()\n\n def __getattr__(self, name):\n return getattr(self.iterator, name)", + "docstring": "Wrap the iterator and modifying its method. This decorator is applied to DataPipes of which method is NOT a generator function. Those method commonly returns but not necessarily.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py", + "ast_data": "ClassDef name:IteratorDecorator FunctionDef name:__init__ arg:self arg:iterator arg:datapipe arg:iterator_id arg:has_next_method arguments arg arg arg arg arg Assign Assign Assign Assign Call Assign BoolOp Compare FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:_get_next arg:self arguments arg Call Assign Call If Return return:yes FunctionDef name:__next__ arg:self arguments arg If With Call Return return:yes Call Return return:yes Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Constraint", + "source_code": "class Constraint:\n is_discrete = False\n event_dim = 0\n\n def check(self, value):\n raise NotImplementedError\n\n def __repr__(self):\n return self.__class__.__name__[1:] + '()'", + "docstring": "Abstract base class for constraints. A constraint object represents a region over which a variable is valid, e.g. within which a variable can be optimized. Attributes: is_discrete (bool): Whether constrained space is discrete. Defaults to False. event_dim (int): Number of rightmost dimensions that together define an event. The :meth: method will remove this many dimensions when computing validity.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:Constraint Assign Assign FunctionDef name:check arg:self arg:value arguments arg arg Raise FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "__getitem__", + "source_code": "def __getitem__(self, item: PositionalIndexer) -> Self | Any:\n raise AbstractMethodError(self)", + "docstring": "Select a subset of self. Parameters ---------- item : int, slice, or ndarray * int: The position in 'self' to get. * slice: A slice object, where 'start', 'stop', and 'step' are integers or None * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' * list[int]: A list of int Returns ------- item : scalar or ExtensionArray Notes ----- For scalar `` is True.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:item arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "pack_sequence_as", + "source_code": "def pack_sequence_as(structure, flat_sequence):\n return nest_util.pack_sequence_as(nest_util.Modality.DATA, structure, flat_sequence, expand_composites=False)", + "docstring": "Returns a given flattened sequence packed into a nest. If is a scalar, must be a single-element list; in this case the return value is . Args: structure: tuple or list constructed of scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are considered scalars. flat_sequence: flat sequence to pack. Returns: packed: converted to have the same recursive structure as . Raises: ValueError: If nest and structure have different element counts.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py", + "ast_data": "FunctionDef name:pack_sequence_as arg:structure arg:flat_sequence arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "call_hook", + "source_code": "def call_hook(hook: Callable[..., Optional[torch.Tensor]], *args: Any, **kwargs: Any) -> torch.Tensor:\n result = hook(*args)\n if result is None:\n return args[0]\n elif kwargs.get('hook_type') == 'post_acc_grad_hook':\n raise RuntimeError('Tensor post accumulate grad hooks should return None.')\n return result", + "docstring": "Used by compiled autograd to handle hook returning None.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\external_utils.py", + "ast_data": "FunctionDef name:call_hook arg:hook arguments arg arg arg Assign Call If Compare Return return:yes If Compare Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "def get_config(self):\n config = dict(zip(self._fields, self))\n config['categorical_column'] = serialization.serialize_feature_column(self.categorical_column)\n config['initializer'] = serialization._serialize_keras_object(self.initializer)\n return config", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "count_up_to", + "source_code": "@deprecated(None, 'Prefer Dataset.range instead.')\ndef count_up_to(self, limit):\n raise NotImplementedError", + "docstring": "Increments this variable until it reaches . When that Op is run it tries to increment the variable by . If incrementing the variable would bring it above then the Op raises the exception . If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for . Args: limit: value at which incrementing the variable raises an error. Returns: A that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:count_up_to arg:self arg:limit arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_get_tensor_by_tf_output", + "source_code": "def _get_tensor_by_tf_output(self, tf_output) -> tensor_lib.Tensor:\n op = self._get_operation_by_tf_operation(tf_output.oper)\n return op.outputs[tf_output.index]", + "docstring": "Returns the representing . Note that there is only one such , i.e. multiple calls to this function with the same TF_Output value will always return the same object. Args: tf_output: A wrapped (the C API equivalent of ). Returns: The that represents .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_get_tensor_by_tf_output arg:self arg:tf_output arguments arg arg Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_final_estimator_has", + "source_code": "def _final_estimator_has(attr):\n\n def check(self):\n getattr(self._final_estimator, attr)\n return True\n return check", + "docstring": "Check that final_estimator has . Used together with in .", + "type": "function", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_final_estimator_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "dst", + "source_code": "@_dispatch\ndef dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n return (Dispatchable(x, np.ndarray),)", + "docstring": "Return the Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xx~scipy.fft.fftdstidstdstidstn=-1n=N2(N+1)n=-1/2n=N-1/2k=-1k=N-1\\sqrt{2}n=-1n=N-1\\sqrt{2}2Nn=-0.5n=N-0.52N`. The orthonormalized DST-IV is exactly its own inverse. Examples -------- Compute the DST of a simple 1D array: >>> import numpy as np >>> from scipy.fft import dst >>> x = np.array([1, -1, 1, -1]) >>> dst(x, type=2) array([0., 0., 0., 8.]) This computes the Discrete Sine Transform (DST) of type-II for the input array. The output contains the transformed values corresponding to the given input sequence References ---------- .. [1] Wikipedia, \"Discrete sine transform\",", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_realtransforms.py", + "ast_data": "FunctionDef name:dst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "solve_constraints", + "source_code": "def solve_constraints(self):\n self.passes = _topological_sort_passes(self.passes, self.constraints)\n self._validated = True", + "docstring": "Finds a valid traversal order based on the given constraints and orders the passes based on this order. If a circular dependency exists between the constraints and steps = 1, then we will raise an error because if steps != 1 this means that we will re-run the passes, allowing for circular dependencies.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py", + "ast_data": "FunctionDef name:solve_constraints arg:self arguments arg Assign Call Assign" + }, + { + "library": "pandas", + "name": "asfreq", + "source_code": "@final\n@doc(klass=_shared_doc_kwargs['klass'])\ndef asfreq(self, freq: Frequency, method: FillnaOptions | None=None, how: Literal['start', 'end'] | None=None, normalize: bool=False, fill_value: Hashable | None=None) -> Self:\n from pandas.core.resample import asfreq\n return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)", + "docstring": "Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:, the new index is the result of transforming the original index with :meth: (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to `pandas.date_rangeresamplethis link`. >>> df.asfreq(freq=\"30s\", method=\"bfill\") s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:asfreq arg:self arg:freq arg:method arg:how arg:normalize arg:fill_value arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "is_available", + "source_code": "def is_available():\n return torch._nnpack_available()", + "docstring": "Return whether PyTorch is built with NNPACK support.", + "type": "function", + "file_path": "pytorch\\torch\\backends\\nnpack\\__init__.py", + "ast_data": "FunctionDef name:is_available arguments Return return:yes Call" + }, + { + "library": "scipy", + "name": "pdf", + "source_code": "def pdf(self, x, alpha):\n alpha = _dirichlet_check_parameters(alpha)\n x = _dirichlet_check_input(alpha, x)\n out = np.exp(self._logpdf(x, alpha))\n return _squeeze_output(out)", + "docstring": "The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray or scalar The probability density function evaluated at .", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:pdf arg:self arg:x arg:alpha arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "LassoLars", + "source_code": "class LassoLars(Lars):\n _parameter_constraints: dict = {**Lars._parameter_constraints, 'alpha': [Interval(Real, 0, None, closed='left')], 'max_iter': [Interval(Integral, 0, None, closed='left')], 'positive': ['boolean']}\n _parameter_constraints.pop('n_nonzero_coefs')\n method = 'lasso'\n\n def __init__(self, alpha=1.0, *, fit_intercept=True, verbose=False, precompute='auto', max_iter=500, eps=np.finfo(float).eps, copy_X=True, fit_path=True, positive=False, jitter=None, random_state=None):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.verbose = verbose\n self.positive = positive\n self.precompute = precompute\n self.copy_X = copy_X\n self.eps = eps\n self.fit_path = fit_path\n self.jitter = jitter\n self.random_state = random_state", + "docstring": "Lasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:. Parameters ---------- alpha : float, default=1.0 Constant that multiplies the penalty term. Defaults to 1.0. `LinearRegressionyGlossary jittern_targetsn_targetsn_targetsfitn_features_in_fitX` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. lasso_path : Compute Lasso path with coordinate descent. Lasso : Linear Model trained with L1 prior as regularizer (aka the Lasso). LassoCV : Lasso linear model with iterative fitting along a regularization path. LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. LassoLarsIC : Lasso model fit with Lars using BIC or AIC for model selection. sklearn.decomposition.sparse_encode : Sparse coding. Examples -------- >>> from sklearn import linear_model >>> reg = linear_model.LassoLars(alpha=0.01) >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) LassoLars(alpha=0.01) >>> print(reg.coef_) [ 0. -0.955]", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py", + "ast_data": "ClassDef name:LassoLars Call Call Call Assign FunctionDef name:__init__ arg:self arg:alpha arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "dfs_helper", + "source_code": "def dfs_helper(node: Node, partition_latency) -> PartitionLatency:\n node_latency = node_to_latency_mapping[node]\n overall_latency_sec = partition_latency.overall_latency_sec + max(node_latency.computer_latency_sec, node_latency.mem_latency_sec)\n mem_latency_sec = partition_latency.mem_latency_sec + node_latency.mem_latency_sec\n computer_latency_sec = partition_latency.computer_latency_sec + node_latency.computer_latency_sec\n users = set(node.users).intersection(partition.nodes)\n if users:\n max_latency = PartitionLatency(mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0)\n for n in users:\n new_partition_latency = dfs_helper(n, PartitionLatency(mem_latency_sec, computer_latency_sec, overall_latency_sec))\n if new_partition_latency.overall_latency_sec > max_latency.overall_latency_sec:\n max_latency = new_partition_latency\n return max_latency\n return PartitionLatency(mem_latency_sec, computer_latency_sec, overall_latency_sec)", + "docstring": "Given a top node of a partition, this function returns the latency of the critical path in the partition", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py", + "ast_data": "FunctionDef name:dfs_helper arg:node arg:partition_latency arguments arg arg Assign Assign Call Assign Assign Assign Call Call If Assign Call For Assign Call Call If Compare Assign Return return:yes Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_nbf", + "source_code": "def validate_nbf(self, now, leeway):\n if 'nbf' in self:\n nbf = self['nbf']\n if not _validate_numeric_time(nbf):\n raise InvalidClaimError('nbf')\n if nbf > now + leeway:\n raise InvalidTokenError()", + "docstring": "The \"nbf\" (not before) claim identifies the time before which the JWT MUST NOT be accepted for processing. The processing of the \"nbf\" claim requires that the current date/time MUST be after or equal to the not-before date/time listed in the \"nbf\" claim. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py", + "ast_data": "FunctionDef name:validate_nbf arg:self arg:now arg:leeway arguments arg arg arg If Compare Assign If Call Raise Call If Compare Raise Call" + }, + { + "library": "matplotlib", + "name": "_get_ticks_position", + "source_code": "def _get_ticks_position(self):\n representative_ticks = []\n if not isinstance(self.get_major_locator(), NullLocator):\n representative_ticks.append(self.majorTicks[0])\n if not isinstance(self.get_minor_locator(), NullLocator):\n representative_ticks.append(self.minorTicks[0])\n if all((tick.tick1line.get_visible() and (not tick.tick2line.get_visible()) and tick.label1.get_visible() and (not tick.label2.get_visible()) for tick in representative_ticks)):\n return 1\n elif all((tick.tick2line.get_visible() and (not tick.tick1line.get_visible()) and tick.label2.get_visible() and (not tick.label1.get_visible()) for tick in representative_ticks)):\n return 2\n elif all((tick.tick1line.get_visible() and tick.tick2line.get_visible() and tick.label1.get_visible() and (not tick.label2.get_visible()) for tick in representative_ticks)):\n return 'default'\n else:\n return 'unknown'", + "docstring": "Helper for and . Check the visibility of tick1line, label1, tick2line, and label2 on the first major and the first minor ticks, provided these ticks are used i.e. the corresponding locator is not a NullLocator, and return - 1 if only tick1line and label1 are visible (which corresponds to \"bottom\" for the x-axis and \"left\" for the y-axis); - 2 if only tick2line and label2 are visible (which corresponds to \"top\" for the x-axis and \"right\" for the y-axis); - \"default\" if only tick1line, tick2line and label1 are visible; - \"unknown\" otherwise.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_get_ticks_position arg:self arguments arg Assign If Call Call Call If Call Call Call If Call BoolOp Call Call Call Call Return return:yes If Call BoolOp Call Call Call Call Return return:yes If Call BoolOp Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_wrap_define_function", + "source_code": "def _wrap_define_function(original_function):\n\n def wrapper(*args, **kwargs):\n has_old_names = False\n for old_name, new_name in _RENAMED_ARGUMENTS.items():\n if old_name in kwargs:\n has_old_names = True\n value = kwargs.pop(old_name)\n kwargs[new_name] = value\n if has_old_names:\n _logging.warning('Use of the keyword argument names (flag_name, default_value, docstring) is deprecated, please use (name, default, help) instead.')\n return original_function(*args, **kwargs)\n return tf_decorator.make_decorator(original_function, wrapper)", + "docstring": "Wraps absl.flags's define functions so tf.flags accepts old names.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\flags.py", + "ast_data": "FunctionDef name:_wrap_define_function arg:original_function arguments arg FunctionDef name:wrapper arguments arg arg Assign For Call If Compare Assign Assign Call Assign If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self, export_scope=None):\n if export_scope is None:\n return self.saver_def\n if not (self.saver_def.filename_tensor_name.startswith(export_scope) and self.saver_def.save_tensor_name.startswith(export_scope) and self.saver_def.restore_op_name.startswith(export_scope)):\n return None\n saver_def = saver_pb2.SaverDef()\n saver_def.CopyFrom(self.saver_def)\n saver_def.filename_tensor_name = ops.strip_name_scope(saver_def.filename_tensor_name, export_scope)\n saver_def.save_tensor_name = ops.strip_name_scope(saver_def.save_tensor_name, export_scope)\n saver_def.restore_op_name = ops.strip_name_scope(saver_def.restore_op_name, export_scope)\n return saver_def", + "docstring": "Converts this to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If Compare Return return:yes If BoolOp Call Call Call Return return:no Assign Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_pinned", + "source_code": "def is_pinned(self, device: Union[str, torch.device]='cuda'):\n _warn_typed_storage_removal()\n return self._untyped_storage.is_pinned(device)", + "docstring": "Determine whether the CPU TypedStorage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A boolean variable.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:is_pinned arg:self arg:device arguments arg arg Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "apply", + "source_code": "@staticmethod\n@abc.abstractmethod\ndef apply(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable, axis: Axis):\n pass", + "docstring": "Executor method to run functions by an axis. While we can see `` is implemented accordingly.", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:apply arg:data arg:func arg:args arg:kwargs arg:decorator arg:axis arguments arg arg arg arg arg arg" + }, + { + "library": "matplotlib", + "name": "reset", + "source_code": "def reset(self):\n if np.any(self.val != self.valinit):\n self.set_val(self.valinit)", + "docstring": "Reset the slider to the initial value.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg If Call Compare Call" + }, + { + "library": "tensorflow", + "name": "_maybe_assert_valid_concentration", + "source_code": "def _maybe_assert_valid_concentration(self, concentration, validate_args):\n if not validate_args:\n return concentration\n concentration = distribution_util.embed_check_categorical_event_shape(concentration)\n return control_flow_ops.with_dependencies([check_ops.assert_positive(concentration, message='Concentration parameter must be positive.')], concentration)", + "docstring": "Checks the validity of the concentration parameter.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py", + "ast_data": "FunctionDef name:_maybe_assert_valid_concentration arg:self arg:concentration arg:validate_args arguments arg arg arg If Return return:yes Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "__getitem__", + "source_code": "def __getitem__(self, index):\n if 0 <= index < self.geom_count:\n return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)\n else:\n raise IndexError('Index out of range when accessing geometry in a collection: %s.' % index)", + "docstring": "Get the Geometry at the specified index.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Return return:yes Call Call Call Raise Call" + }, + { + "library": "seaborn", + "name": "EdgeWidth", + "source_code": "class EdgeWidth(IntervalProperty):\n\n @property\n def default_range(self) -> tuple[float, float]:\n base = mpl.rcParams['patch.linewidth']\n return (base * 0.5, base * 2)", + "docstring": "Thickness of the edges on a patch mark, in points.", + "type": "class", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "ClassDef name:EdgeWidth FunctionDef name:default_range arg:self arguments arg Assign Return return:yes" + }, + { + "library": "kornia", + "name": "get_translation_matrix2d", + "source_code": "def get_translation_matrix2d(translations: Tensor) -> Tensor:\n transform: Tensor = eye_like(3, translations)[:, :2, :]\n transform[..., 2] += translations\n transform_h = convert_affinematrix_to_homography(transform)\n return transform_h", + "docstring": "Compose translation matrix from the components. Args: translations: tensor containing the translation vector with shape :math:. Returns: the affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:, :func:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", + "ast_data": "FunctionDef name:get_translation_matrix2d arg:translations arguments arg Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "ImmutableList", + "source_code": "class ImmutableList(tuple):\n\n def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):\n self = tuple.__new__(cls, *args, **kwargs)\n self.warning = warning\n return self\n\n def complain(self, *args, **kwargs):\n raise AttributeError(self.warning)\n __delitem__ = complain\n __delslice__ = complain\n __iadd__ = complain\n __imul__ = complain\n __setitem__ = complain\n __setslice__ = complain\n append = complain\n extend = complain\n insert = complain\n pop = complain\n remove = complain\n sort = complain\n reverse = complain", + "docstring": "A tuple-like object that raises useful errors when it is asked to mutate. Example:: >>> a = ImmutableList(range(5), warning=\"You cannot mutate this.\") >>> a[3] = '4' Traceback (most recent call last): ... AttributeError: You cannot mutate this.", + "type": "class", + "file_path": "django\\django\\utils\\datastructures.py", + "ast_data": "ClassDef name:ImmutableList FunctionDef name:__new__ arg:cls arguments arg arg arg arg Assign Call Assign Return return:yes FunctionDef name:complain arg:self arguments arg arg arg Raise Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "python_properties", + "source_code": "@abc.abstractproperty\ndef python_properties(self):\n raise NotImplementedError", + "docstring": "Returns dictionary of python properties to save in the metadata. This dictionary must be serializable and deserializable to/from JSON. When loading, the items in this dict are used to initialize the object and define attributes in the revived object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py", + "ast_data": "FunctionDef name:python_properties arg:self arguments arg Raise" + }, + { + "library": "django", + "name": "format_subject", + "source_code": "def format_subject(self, subject):\n return subject.replace('\\n', '\\\\n').replace('\\r', '\\\\r')", + "docstring": "Escape CR and LF characters.", + "type": "method", + "file_path": "django\\django\\utils\\log.py", + "ast_data": "FunctionDef name:format_subject arg:self arg:subject arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "rank", + "source_code": "@tf_export('rank')\n@dispatch.add_dispatch_support\ndef rank(input, name=None):\n return rank_internal(input, name, optimize=True)", + "docstring": "Returns the rank of a tensor. See also . Returns a 0-D representing the rank of . For example: **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\" Args: input: A or . name: A name for the operation (optional). Returns: A of type . @compatibility(numpy) Equivalent to np.ndim @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:rank arg:input arg:name arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_url", + "source_code": "def set_url(self, url):\n self._url = url", + "docstring": "Set the url for links in compatible backends.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_url arg:self arg:url arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "tick_left", + "source_code": "def tick_left(self):\n label = True\n if 'label1On' in self._major_tick_kw:\n label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n self.set_ticks_position('left')\n self.set_tick_params(which='both', labelleft=label)", + "docstring": "Move ticks and ticklabels (if present) to the left of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:tick_left arg:self arguments arg Assign If Compare Assign BoolOp Call Call" + }, + { + "library": "scikit-learn", + "name": "set_params", + "source_code": "def set_params(self, **kwargs):\n self._set_params('_transformers', **kwargs)\n return self", + "docstring": "Set the parameters of this estimator. Valid parameter keys can be listed with `transformersColumnTransformer`. Parameters ---------- **kwargs : dict Estimator parameters. Returns ------- self : ColumnTransformer This estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "set_params", + "source_code": "def set_params(self, **kwargs):\n estimator = kwargs.pop('estimator', None)\n if estimator is not None:\n self.estimator = estimator\n if kwargs:\n raise ValueError('You cannot set parameters of the inner estimator in a frozen estimator since calling `fit` has no effect. You can use `frozenestimator.estimator.set_params` to set parameters of the inner estimator.')", + "docstring": "Set the parameters of this estimator. The only valid key here is . You cannot set the parameters of the inner estimator. Parameters ---------- **kwargs : dict Estimator parameters. Returns ------- self : FrozenEstimator This estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Assign Call If Compare Assign If Raise Call" + }, + { + "library": "pandas", + "name": "_reduce", + "source_code": "def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs):\n result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)\n if isinstance(result, pa.Array):\n return type(self)(result)\n else:\n return result", + "docstring": "Return a scalar result of performing the reduction operation. Parameters ---------- name : str Name of the function, supported values are: { any, all, min, max, sum, mean, median, prod, std, var, sem, kurt, skew }. skipna : bool, default True If True, skip NaN values. **kwargs Additional keyword arguments passed to the reduction function. Currently, is the only supported kwarg. Returns ------- scalar Raises ------ TypeError : subclass does not define reductions", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:_reduce arg:self arg:name arguments arg arg arg arg arg Assign Call If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "CrawlerRunner", + "source_code": "class CrawlerRunner(CrawlerRunnerBase):\n\n def __init__(self, settings: dict[str, Any] | Settings | None=None):\n super().__init__(settings)\n self._active: set[Deferred[None]] = set()\n\n def crawl(self, crawler_or_spidercls: type[Spider] | str | Crawler, *args: Any, **kwargs: Any) -> Deferred[None]:\n if isinstance(crawler_or_spidercls, Spider):\n raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)')\n crawler = self.create_crawler(crawler_or_spidercls)\n return self._crawl(crawler, *args, **kwargs)\n\n @inlineCallbacks\n def _crawl(self, crawler: Crawler, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]:\n self.crawlers.add(crawler)\n d = crawler.crawl(*args, **kwargs)\n self._active.add(d)\n try:\n yield d\n finally:\n self.crawlers.discard(crawler)\n self._active.discard(d)\n self.bootstrap_failed |= not getattr(crawler, 'spider', None)\n\n def stop(self) -> Deferred[Any]:\n return self._stop()\n\n @inlineCallbacks\n def join(self) -> Generator[Deferred[Any], Any, None]:\n while self._active:\n yield DeferredList(self._active)", + "docstring": "This is a convenient helper class that keeps track of, manages and runs crawlers inside an already setup :mod:. The CrawlerRunner object must be instantiated with a :class: object. This class shouldn't be needed (since Scrapy is responsible of using it accordingly) unless writing scripts that manually handle the crawling process. See :ref: for an example. This class provides Deferred-based APIs. Use :class: for modern coroutine APIs.", + "type": "class", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "ClassDef name:CrawlerRunner FunctionDef name:__init__ arg:self arg:settings arguments arg arg Call Call Call FunctionDef name:crawl arg:self arg:crawler_or_spidercls arguments arg arg arg arg If Call Raise Call Assign Call Return return:yes Call FunctionDef name:_crawl arg:self arg:crawler arguments arg arg arg arg Call Assign Call Call Try Call Call Call FunctionDef name:stop arg:self arguments arg Return return:yes Call FunctionDef name:join arg:self arguments arg While Call" + }, + { + "library": "tensorflow", + "name": "validate_slicing_string", + "source_code": "def validate_slicing_string(slicing_string):\n return bool(re.search('^\\\\[(\\\\d|,|\\\\s|:)+\\\\]$', slicing_string))", + "docstring": "Validate a slicing string. Check if the input string contains only brackets, digits, commas and colons that are valid characters in numpy-style array slicing. Args: slicing_string: (str) Input slicing string to be validated. Returns: (bool) True if and only if the slicing string is valid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py", + "ast_data": "FunctionDef name:validate_slicing_string arg:slicing_string arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_lru_cache", + "source_code": "def _lru_cache(self, fn, maxsize=None):\n fn_cache = functools.lru_cache(maxsize)(fn)\n prior_len = len(self.replacements)\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n nonlocal prior_len\n if prior_len != len(self.replacements):\n prior_len = len(self.replacements)\n fn_cache.cache_clear()\n return fn_cache(*args, **kwargs)\n return wrapper", + "docstring": "Wrapper around functools.lru_cache that clears when replacements has been invalidated.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:_lru_cache arg:self arg:fn arg:maxsize arguments arg arg arg Assign Call Call Assign Call FunctionDef name:wrapper arguments arg arg If Compare Call Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, module):\n super().__init__()\n self.module = module", + "docstring": "Collapses input of dim T*N*H to (T*N)*H, and applies to a module. Allows handling of variable sequence lengths and minibatch sizes. :param module: Module to apply input to.", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:module arguments arg arg Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_ragged_tensor_mse", + "source_code": "@dispatch.dispatch_for_types(mean_squared_error, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_mse(y_true, y_pred):\n return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)", + "docstring": "Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = . y_pred: RaggedTensor predicted values. shape = . Returns: Mean squared error values. shape = . When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_ragged_tensor_mse arg:y_true arg:y_pred arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "add_request", + "source_code": "def add_request(self, *, param, alias):\n if not request_is_alias(alias) and (not request_is_valid(alias)):\n raise ValueError(f\"The alias you're setting for `{param}` should be either a valid identifier or one of {{None, True, False}}, but given value is: `{alias}`\")\n if alias == param:\n alias = True\n if alias == UNUSED:\n if param in self._requests:\n del self._requests[param]\n else:\n raise ValueError(f\"Trying to remove parameter {param} with UNUSED which doesn't exist.\")\n else:\n self._requests[param] = alias\n return self", + "docstring": "Add request info for a metadata. Parameters ---------- param : str The property for which a request is set. alias : str, or {True, False, None} Specifies which metadata should be routed to - str: the name (or alias) of metadata given to a meta-estimator that should be routed to this parameter. - True: requested - False: not requested - None: error if passed", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:add_request arg:self arguments arg arg arg If BoolOp Call Call Raise Call If Compare Assign If Compare If Compare Raise Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_assert_all_ranks_match", + "source_code": "def _assert_all_ranks_match(values):\n ranks = [_get_all_ranks(st) for st in values]\n for other_ranks in ranks[1:]:\n if other_ranks != ranks[0]:\n raise ValueError('Ranks of sub-message do not match')", + "docstring": "Raises an error if the ranks of submessages are not identical.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_assert_all_ranks_match arg:values arguments arg Assign Call For If Compare Raise Call" + }, + { + "library": "django", + "name": "localize_tag", + "source_code": "@register.tag('localize')\ndef localize_tag(parser, token):\n use_l10n = None\n bits = list(token.split_contents())\n if len(bits) == 1:\n use_l10n = True\n elif len(bits) > 2 or bits[1] not in ('on', 'off'):\n raise TemplateSyntaxError(\"%r argument should be 'on' or 'off'\" % bits[0])\n else:\n use_l10n = bits[1] == 'on'\n nodelist = parser.parse(('endlocalize',))\n parser.delete_first_token()\n return LocalizeNode(nodelist, use_l10n)", + "docstring": "Force or prevents localization of values. Sample usage:: {% localize off %} var pi = {{ 3.1415 }}; {% endlocalize %}", + "type": "function", + "file_path": "django\\django\\templatetags\\l10n.py", + "ast_data": "FunctionDef name:localize_tag arg:parser arg:token arguments arg arg Assign Assign Call Call If Compare Call Assign If BoolOp Compare Call Compare Raise Call Assign Compare Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "create_rdzv_handler", + "source_code": "def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:\n client = _create_etcd_client(params)\n etcd_prefix = params.get('etcd_prefix', '/torchelastic/p2p')\n rdzv = EtcdRendezvous(client=client, prefix=etcd_prefix, run_id=params.run_id, num_min_workers=params.min_nodes, num_max_workers=params.max_nodes, timeout=params.get_as_int('timeout', _DEFAULT_TIMEOUT), last_call_timeout=params.get_as_int('last_call_timeout', _DEFAULT_LAST_CALL_TIMEOUT))\n return EtcdRendezvousHandler(rdzv_impl=rdzv, local_addr=params.local_addr)", + "docstring": "Usage: :: rdzv_params = RendezvousParameters( backend=\"etcd\", endpoint=\"192.168.0.42:2379\", run_id=\"123\", min_nodes=4, max_nodes=8, timeout=300, last_call_timeout=30, etcd_prefix=\"custom_prefix\", protocol=\"https\", cacert=\"/etc/kubernetes/certs/ca.crt\", cert=\"/etc/kubernetes/certs/client.crt\", key=\"/etc/kubernetes/certs/client.key\") # -- or -- rdzv_params = RendezvousParameters( backend=\"etcd\", endpoint=\"192.168.0.42:2379\", run_id=\"123\", min_nodes=4, max_nodes=8) etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params) Where: run_id - unique id for this training job instance, min_nodes - min number of workers expected to join the rendezvous, max_nodes - max number of workers allowed to join the rendezvous, defaults to min_workers is not specified. timeout - total timeout within which next_rendezvous is expected to succeed; a RendezvousTimeoutError is raised otherwise; Defaults is 600 (10 minutes). last_call_timeout - additional wait amount (\"last call\") after min number of workers has been reached. Defaults to 30 seconds. etcd_prefix - path prefix (from etcd root), inside which all etcd nodes will be created. Default is \"/torchelastic/p2p\". protocol - http (default) or https to access etcd. cacert - CA cert to access etcd, only makes sense with https. cert - client cert to access etcd, only makes sense with https. key - client key to access etcd, only makes sense with https.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py", + "ast_data": "FunctionDef name:create_rdzv_handler arg:params arguments arg Assign Call Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, shorthand_name=None):\n self._parents = {}\n self._invalid = self._INVALID_FULL\n self._shorthand_name = shorthand_name or ''", + "docstring": "Parameters ---------- shorthand_name : str A string representing the \"name\" of the transform. The name carries no significance other than to improve the readability of `` when DEBUG=True.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:shorthand_name arguments arg arg Assign Assign Assign BoolOp" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n with self._pytree_extension_context:\n return super().apply(model_outputs, model=model)", + "docstring": "Flatten the model outputs, under the context of pytree extension.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py", + "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg With Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "find_peaks_cwt", + "source_code": "def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, gap_thresh=None, min_length=None, min_snr=1, noise_perc=10, window_size=None):\n widths = np.atleast_1d(np.asarray(widths))\n if gap_thresh is None:\n gap_thresh = np.ceil(widths[0])\n if max_distances is None:\n max_distances = widths / 4.0\n if wavelet is None:\n wavelet = _ricker\n cwt_dat = _cwt(vector, wavelet, widths)\n ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)\n filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, window_size=window_size, min_snr=min_snr, noise_perc=noise_perc)\n max_locs = np.asarray([x[1][0] for x in filtered])\n max_locs.sort()\n return max_locs", + "docstring": "Find peaks in a 1-D array with wavelet transformation. The general approach is to smooth by convolving it with for each width in . Relative maxima which appear at enough length scales, and with sufficiently high SNR, are accepted. Parameters ---------- vector : ndarray 1-D array in which to find the peaks. widths : float or sequence Single width or 1-D array-like of widths to use for calculating the CWT matrix. In general, this range should cover the expected width of peaks of interest. wavelet : callable, optional Should take two parameters and return a 1-D array to convolve with . The first parameter determines the number of points of the returned wavelet array, the second parameter is the scale () of the wavelet. Should be normalized and symmetric. Default is the ricker wavelet. max_distances : ndarray, optional At each row, a ridge line is only connected if the relative max at row[n] is within `max_distancesgap_threshnoise_percstats.scoreatpercentilevectorvectorwidthsvectorwavelet(width)widthscwt10.1093/bioinformatics/btl355` Examples -------- >>> import numpy as np >>> from scipy import signal >>> xs = np.arange(0, np.pi, 0.05) >>> data = np.sin(xs) >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10)) >>> peakind, xs[peakind], data[peakind] ([32], array([ 1.6]), array([ 0.9995736]))", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:find_peaks_cwt arg:vector arg:widths arg:wavelet arg:max_distances arg:gap_thresh arg:min_length arg:min_snr arg:noise_perc arg:window_size arguments arg arg arg arg arg arg arg arg arg Assign Call Call If Compare Assign Call If Compare Assign If Compare Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_dpi", + "source_code": "def set_dpi(self, val):\n self._parent.dpi = val\n self.stale = True", + "docstring": "Set the resolution of parent figure in dots-per-inch. Parameters ---------- val : float", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:set_dpi arg:self arg:val arguments arg arg Assign Assign" + }, + { + "library": "scipy", + "name": "ReentrancyLock", + "source_code": "class ReentrancyLock:\n\n def __init__(self, err_msg):\n self._rlock = threading.RLock()\n self._entered = False\n self._err_msg = err_msg\n\n def __enter__(self):\n self._rlock.acquire()\n if self._entered:\n self._rlock.release()\n raise ReentrancyError(self._err_msg)\n self._entered = True\n\n def __exit__(self, type, value, traceback):\n self._entered = False\n self._rlock.release()\n\n def decorate(self, func):\n\n def caller(func, *a, **kw):\n with self:\n return func(*a, **kw)\n return scipy._lib.decorator.decorate(func, caller)", + "docstring": "Threading lock that raises an exception for reentrant calls. Calls from different threads are serialized, and nested calls from the same thread result to an error. The object can be used as a context manager or to decorate functions via the decorate() method.", + "type": "class", + "file_path": "scipy\\scipy\\_lib\\_threadsafety.py", + "ast_data": "ClassDef name:ReentrancyLock FunctionDef name:__init__ arg:self arg:err_msg arguments arg arg Assign Call Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call If Call Raise Call Assign FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call FunctionDef name:decorate arg:self arg:func arguments arg arg FunctionDef name:caller arg:func arguments arg arg arg With Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "histogram_raw", + "source_code": "def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):\n hist = HistogramProto(min=min, max=max, num=num, sum=sum, sum_squares=sum_squares, bucket_limit=bucket_limits, bucket=bucket_counts)\n return Summary(value=[Summary.Value(tag=name, histo=hist)])", + "docstring": "Output a protocol buffer with a histogram. The generated []( has one summary value containing a histogram for . Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. min: A float or int min value max: A float or int max value num: Int number of values sum: Float or int sum of all values sum_squares: Float or int sum of squares for all values bucket_limits: A numeric with upper value per bucket bucket_counts: A numeric with number of values per bucket Returns: A scalar of type . The serialized protocol buffer.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py", + "ast_data": "FunctionDef name:histogram_raw arg:name arg:min arg:max arg:num arg:sum arg:sum_squares arg:bucket_limits arg:bucket_counts arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "inv", + "source_code": "@property\ndef inv(self) -> 'Transform':\n inv = None\n if self._inv is not None:\n inv = self._inv()\n if inv is None:\n inv = _InverseTransform(self)\n self._inv = weakref.ref(inv)\n return inv", + "docstring": "Returns the inverse :class: of this transform. This should satisfy ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "FunctionDef name:inv arg:self arguments arg Assign If Compare Assign Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "on_error", + "source_code": "def on_error(*args, **kwargs):\n body = str(sys.exc_info()[1])\n _set_response(xmlrpc_dumps(XMLRPCFault(1, body)))", + "docstring": "Construct HTTP response body for an error response.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py", + "ast_data": "FunctionDef name:on_error arguments arg arg Assign Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_lru_cache", + "source_code": "def _lru_cache(fn: Callable[..., _T], maxsize: Optional[int]=None) -> functools._lru_cache_wrapper[_T]:\n fn_cache = lru_cache(maxsize)(fn)\n prior_version = 0\n if config.validate_shape_env_version_key:\n prior_key = None\n\n @functools.wraps(fn)\n def wrapper(self: ShapeEnv, *args: Any, **kwargs: Any) -> _T:\n nonlocal prior_version, prior_key\n if prior_key is None:\n prior_key = self._get_key()\n if prior_version != self._version_counter:\n fn_cache.cache_clear()\n prior_version = self._version_counter\n prior_key = self._get_key()\n else:\n assert prior_key == self._get_key(), 'ShapeEnv cache key changed without version being updated!'\n return fn_cache(self, *args, **kwargs)\n else:\n\n @functools.wraps(fn)\n def wrapper(self: ShapeEnv, *args: Any, **kwargs: Any) -> _T:\n nonlocal prior_version\n if prior_version != self._version_counter:\n fn_cache.cache_clear()\n prior_version = self._version_counter\n return fn_cache(self, *args, **kwargs)\n wrapper.cache_clear = fn_cache.cache_clear\n wrapper.cache_info = fn_cache.cache_info\n return wrapper", + "docstring": "Wrapper around lru_cache that clears when new info about shapes has been updated. Use lru_cache if the output is always the same, regardless of the constraints we know now (i.e. evaluate_expr) Use _lru_cache otherwise. Also note that this depends on _update_version_counter being called on the shape environment whenever the constraints are updated, otherwise the cache will not be cleared.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:_lru_cache arg:fn arg:maxsize arguments arg arg Assign Call Call Assign If Assign FunctionDef name:wrapper arg:self arguments arg arg arg If Compare Assign Call If Compare Call Assign Assign Call Compare Call Return return:yes Call Call FunctionDef name:wrapper arg:self arguments arg arg arg If Compare Call Assign Return return:yes Call Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "shutdown", + "source_code": "@_require_initialized\ndef shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):\n if graceful:\n try:\n agent = _get_current_rpc_agent()\n if not isinstance(agent, TensorPipeAgent) or agent.is_static_group:\n _wait_all_workers(timeout)\n _delete_all_user_and_unforked_owner_rrefs()\n agent.join(shutdown=True, timeout=timeout)\n else:\n my_worker_info = agent.get_worker_info()\n my_name = my_worker_info.name\n with _group_membership_management(agent.store, my_name, False):\n all_worker_infos = agent.get_worker_infos()\n for worker in all_worker_infos:\n if worker.name != my_name:\n rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False))\n agent.join(shutdown=True, timeout=timeout)\n finally:\n _finalize_shutdown()\n else:\n _finalize_shutdown()", + "docstring": "Perform a shutdown of the RPC agent, and then destroy the RPC agent. This stops the local agent from accepting outstanding requests, and shuts down the RPC framework by terminating all RPC threads. If `~torch.futures.Future~torch.distributed.rpc.rpc_async~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc(\"worker0\", rank=0, world_size=2) >>> # do some work >>> result = rpc.rpc_sync(\"worker1\", torch.add, args=(torch.ones(1), 1)) >>> # ready to shutdown >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc(\"worker1\", rank=1, world_size=2) >>> # wait for worker 0 to finish work, and then shutdown. >>> rpc.shutdown()", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\rpc\\api.py", + "ast_data": "FunctionDef name:shutdown arg:graceful arg:timeout arguments arg arg If Try Assign Call If BoolOp Call Call Call Call Assign Call Assign With Call Assign Call For If Compare Call Call Call Call" + }, + { + "library": "sphinx", + "name": "get_signatures", + "source_code": "def get_signatures(self) -> list[str]:\n lines = nl_escape_re.sub('', self.arguments[0]).split('\\n')\n if self.config.strip_signature_backslash:\n return [strip_backslash_re.sub('\\\\1', line.strip()) for line in lines]\n else:\n return [line.strip() for line in lines]", + "docstring": "Retrieve the signatures to document from the directive arguments. By default, signatures are given as arguments, one per line.", + "type": "method", + "file_path": "sphinx\\sphinx\\directives\\__init__.py", + "ast_data": "FunctionDef name:get_signatures arg:self arguments arg Assign Call Call If Return return:yes Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "__rfloordiv__", + "source_code": "def __rfloordiv__(self, other):\n return floor_divide(other, self)", + "docstring": "Divide self into other, and return a new masked array.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__rfloordiv__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "query_user_grant", + "source_code": "def query_user_grant(self, user_code):\n raise NotImplementedError()", + "docstring": "Get user and grant via the given user code. Developers MUST implement it in subclass:: def query_user_grant(self, user_code): # e.g. we saved user grant info in redis data = redis.get(\"oauth_user_grant:\" + user_code) if not data: return None user_id, allowed = data.split() user = User.get(user_id) return user, bool(allowed) Note, user grant information is saved by verification endpoint.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py", + "ast_data": "FunctionDef name:query_user_grant arg:self arg:user_code arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "DynamoFlattenOutputStep", + "source_code": "class DynamoFlattenOutputStep(io_adapter.FlattenOutputStep):\n\n def __init__(self, pytree_extension_context: _PyTreeExtensionContext | None=None):\n super().__init__()\n self._pytree_extension_context = pytree_extension_context or _PyTreeExtensionContext()\n\n def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n with self._pytree_extension_context:\n return super().apply(model_outputs, model=model)", + "docstring": "Flatten nested collection and custom python types and return a flat list of elements. Extended from :class: to support flattening arbitrary types via pytree extension. By default this supports many common user defined python types such as :class: from HuggingFace transformers. The pytree extension can be customized by passing in a `_PyTreeExtensionContext.register_pytree_node`.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py", + "ast_data": "ClassDef name:DynamoFlattenOutputStep FunctionDef name:__init__ arg:self arg:pytree_extension_context arguments arg arg Call Call Assign BoolOp Call FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg With Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "projections_from_fundamental", + "source_code": "def projections_from_fundamental(F_mat: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(F_mat, ['*', '3', '3'])\n R1 = eye_like(3, F_mat)\n t1 = vec_like(3, F_mat)\n Ft_mat = F_mat.transpose(-2, -1)\n _, e2 = _nullspace(Ft_mat)\n R2 = cross_product_matrix(e2) @ F_mat\n t2 = e2[..., :, None]\n P1 = torch.cat([R1, t1], dim=-1)\n P2 = torch.cat([R2, t2], dim=-1)\n return stack([P1, P2], dim=-1)", + "docstring": "Get the projection matrices from the Fundamental Matrix. Args: F_mat: the fundamental matrix with the shape :math:. Returns: The projection matrices with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py", + "ast_data": "FunctionDef name:projections_from_fundamental arg:F_mat arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "zeros", + "source_code": "def zeros(*size, requires_grad: bool=False, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n torch_size = normalize_to_torch_size(size)\n return _dtensor_init_helper(torch.zeros, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)", + "docstring": "Returns a :class: filled with the scalar value 0. Args: size (int...): a sequence of integers defining the shape of the output :class:. Can be a variable number of arguments or a collection like a list or tuple. E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..)) Keyword args: requires_grad (bool, optional): If autograd should record operations on the returned :class:. Default: `torch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:zeros arguments arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "home", + "source_code": "def home(self):\n self.views[self.figure].home()\n self.positions[self.figure].home()", + "docstring": "Recall the first view and position from the stack.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:home arg:self arguments arg Call Call" + }, + { + "library": "scikit-learn", + "name": "is_pydata_sparse_array", + "source_code": "def is_pydata_sparse_array(x: object) -> TypeIs[sparse.SparseArray]:\n cls = cast(Hashable, type(x))\n return _issubclass_fast(cls, 'sparse', 'SparseArray')", + "docstring": "Return True if is an array from the package. This function does not import if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_torch_array is_ndonnx_array is_dask_array is_jax_array", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_pydata_sparse_array arg:x arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "release", + "source_code": "def release(self, event):\n if not self.ignore(event) and self._eventpress:\n event = self._clean_event(event)\n self._eventrelease = event\n self._release(event)\n self._eventpress = None\n self._eventrelease = None\n self._state.discard('move')\n return True\n return False", + "docstring": "Button release event handler and validator.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:release arg:self arg:event arguments arg arg If BoolOp Call Assign Call Assign Call Assign Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "override_error_code_in_rootcause_data", + "source_code": "def override_error_code_in_rootcause_data(self, rootcause_error_file: str, rootcause_error: dict[str, Any], error_code: int=0):\n if 'message' not in rootcause_error:\n logger.warning('child error file (%s) does not have field `message`. \\ncannot override error code: %s', rootcause_error_file, error_code)\n elif isinstance(rootcause_error['message'], str):\n logger.warning('child error file (%s) has a new message format. \\nskipping error code override', rootcause_error_file)\n else:\n rootcause_error['message']['errorCode'] = error_code", + "docstring": "Modify the rootcause_error read from the file, to correctly set the exit code.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py", + "ast_data": "FunctionDef name:override_error_code_in_rootcause_data arg:self arg:rootcause_error_file arg:rootcause_error arg:error_code arguments arg arg arg arg If Compare Call If Call Call Assign" + }, + { + "library": "scipy", + "name": "moderatec", + "source_code": "def moderatec(c):\n np.nan_to_num(c, copy=False, nan=CONSTRMAX)\n c = np.clip(c, -CONSTRMAX, CONSTRMAX)\n return c", + "docstring": "This function moderates the constraint value, the constraint demanding this value to be NONNEGATIVE. It replaces any value below -CONSTRMAX by -CONSTRMAX, and any NaN or value above CONSTRMAX by CONSTRMAX.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\evaluate.py", + "ast_data": "FunctionDef name:moderatec arg:c arguments arg Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_message_with_time", + "source_code": "def _message_with_time(source, message, time):\n start_message = '[%s] ' % source\n if time > 60:\n time_str = '%4.1fmin' % (time / 60)\n else:\n time_str = ' %5.1fs' % time\n end_message = ' %s, total=%s' % (message, time_str)\n dots_len = 70 - len(start_message) - len(end_message)\n return '%s%s%s' % (start_message, dots_len * '.', end_message)", + "docstring": "Create one line message for logging purposes. Parameters ---------- source : str String indicating the source or the reference of the message. message : str Short message. time : int Time in seconds.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_user_interface.py", + "ast_data": "FunctionDef name:_message_with_time arg:source arg:message arg:time arguments arg arg arg Assign If Compare Assign Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "send_eager_tracebacks", + "source_code": "def send_eager_tracebacks(destinations, origin_stack, send_source=True):\n _send_call_tracebacks(destinations, origin_stack, is_eager_execution=True, send_source=send_source)", + "docstring": "Send the tracebacks of an eager execution call to debug server(s). Args: destinations: gRPC destination addresses, a or a of s, e.g., \"localhost:4242\". If a , gRPC requests containing the same origin_stack: The traceback of the eager operation invocation. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_remote.py", + "ast_data": "FunctionDef name:send_eager_tracebacks arg:destinations arg:origin_stack arg:send_source arguments arg arg arg Call" + }, + { + "library": "matplotlib", + "name": "get_aspect", + "source_code": "def get_aspect(self):\n return self._aspect", + "docstring": "Return the aspect ratio of the Axes scaling. This is either \"auto\" or a float giving the ratio of y/x-scale.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_aspect arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, output):\n self._output_type = _api.check_getitem({'path': 'vector', 'agg': 'raster', 'macosx': 'raster'}, output=output.lower())", + "docstring": "Create a MathTextParser for the given backend *output*. Parameters ---------- output : {\"path\", \"agg\"} Whether to return a (\"path\") or a (\"agg\", or its synonym \"macosx\").", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\mathtext.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:output arguments arg arg Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "Tags", + "source_code": "@dataclass(slots=True)\nclass Tags:\n estimator_type: str | None\n target_tags: TargetTags\n transformer_tags: TransformerTags | None = None\n classifier_tags: ClassifierTags | None = None\n regressor_tags: RegressorTags | None = None\n array_api_support: bool = False\n no_validation: bool = False\n non_deterministic: bool = False\n requires_fit: bool = True\n _skip_test: bool = False\n input_tags: InputTags = field(default_factory=InputTags)", + "docstring": "Tags for the estimator. See :ref: for more information. Parameters ---------- estimator_type : str or None The type of the estimator. Can be one of: - \"classifier\" - \"regressor\" - \"transformer\" - \"clusterer\" - \"outlier_detector\" - \"density_estimator\" target_tags : :class: The target(y) tags. transformer_tags : :class: or None The transformer tags. classifier_tags : :class: or None The classifier tags. regressor_tags : :class: or None The regressor tags. array_api_support : bool, default=False Whether the estimator supports Array API compatible inputs. no_validation : bool, default=False Whether the estimator skips input-validation. This is only meant for stateless and dummy transformers! non_deterministic : bool, default=False Whether the estimator is not deterministic given a fixed `transformpredictpredict_probadecision_functionInputTags` The input data(X) tags.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_tags.py", + "ast_data": "ClassDef name:Tags Call Call" + }, + { + "library": "pytorch", + "name": "put", + "source_code": "def put(self, req: Any) -> None:\n if not self.alive():\n self.start()\n TuningProcess.send(req, self.write_pipe)", + "docstring": "Push a work item to the child process.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:put arg:self arg:req arguments arg arg If Call Call Call" + }, + { + "library": "matplotlib", + "name": "_resize_sequence", + "source_code": "def _resize_sequence(seq, N):\n num_elements = len(seq)\n if N == num_elements:\n return seq\n elif N < num_elements:\n return seq[:N]\n else:\n return list(itertools.islice(itertools.cycle(seq), N))", + "docstring": "Trim the given sequence to exactly N elements. If there are more elements in the sequence, cut it. If there are less elements in the sequence, repeat them. Implementation detail: We maintain type stability for the output for N len(seq); this was good enough for the present use cases but is not a fixed design decision.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_resize_sequence arg:seq arg:N arguments arg arg Assign Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "RegularPolyCollection", + "source_code": "class RegularPolyCollection(_CollectionWithSizes):\n _path_generator = mpath.Path.unit_regular_polygon\n _factor = np.pi ** (-1 / 2)\n\n def __init__(self, numsides, *, rotation=0, sizes=(1,), **kwargs):\n super().__init__(**kwargs)\n self.set_sizes(sizes)\n self._numsides = numsides\n self._paths = [self._path_generator(numsides)]\n self._rotation = rotation\n self.set_transform(transforms.IdentityTransform())\n\n def get_numsides(self):\n return self._numsides\n\n def get_rotation(self):\n return self._rotation\n\n @artist.allow_rasterization\n def draw(self, renderer):\n self.set_sizes(self._sizes, self.get_figure(root=True).dpi)\n self._transforms = [transforms.Affine2D(x).rotate(-self._rotation).get_matrix() for x in self._transforms]\n Collection.draw(self, renderer)", + "docstring": "A collection of n-sided regular polygons.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "ClassDef name:RegularPolyCollection Assign Assign FunctionDef name:__init__ arg:self arg:numsides arguments arg arg arg arg arg Call Call Call Assign Assign Call Assign Call Call FunctionDef name:get_numsides arg:self arguments arg Return return:yes FunctionDef name:get_rotation arg:self arguments arg Return return:yes FunctionDef name:draw arg:self arg:renderer arguments arg arg Call Call Assign Call Call Call Call" + }, + { + "library": "numpy", + "name": "polyval2d", + "source_code": "@array_function_dispatch(_polyval2d_dispatcher)\ndef polyval2d(x, y, c):\n return pu._valnd(polyval, c, x, y)", + "docstring": "Evaluate a 2-D polynomial at points (x, y). This function returns the value .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- polyval, polygrid2d, polyval3d, polygrid3d Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6)) >>> P.polyval2d(1, 1, c) 21.0", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polynomial.py", + "ast_data": "FunctionDef name:polyval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_unflat_views_unaligned", + "source_code": "@no_type_check\ndef _get_unflat_views_unaligned(self, tensor: Optional[torch.Tensor]=None) -> Iterator[Tensor]:\n flat_param = self.flat_param\n if tensor is None:\n tensor = flat_param\n views = (_ext_post_unflatten_transform(subtensor.view(shape) if contiguous else subtensor.as_strided(shape, stride), param_extension, self._fsdp_extension) for subtensor, shape, stride, contiguous, param_extension in zip(torch.split(tensor, flat_param._numels, dim=0), flat_param._shapes, flat_param._strides, flat_param._contiguities, flat_param._param_extensions))\n return views", + "docstring": "Return unflattened `tensor`` or unsharded tensor optimizer state.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_get_unflat_views_unaligned arg:self arg:tensor arguments arg arg Assign If Compare Assign Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "_tx_resource_slug_for_name", + "source_code": "def _tx_resource_slug_for_name(name):\n if name != 'core':\n name = f'contrib-{name}'\n return name", + "docstring": "Return the Transifex resource slug for the given name.", + "type": "function", + "file_path": "django\\scripts\\manage_translations.py", + "ast_data": "FunctionDef name:_tx_resource_slug_for_name arg:name arguments arg If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_join_dimensions_cached", + "source_code": "@functools.lru_cache(256)\ndef _join_dimensions_cached(expr: Expr) -> Expr:\n assert isinstance(expr, sympy.Add)\n scale = sympy.Wild('scale', exclude=[0], integer=True)\n base = sympy.Wild('base', integer=True)\n divisor = sympy.Wild('divisor', integer=True)\n mod1 = sympy.Wild('modulus', integer=True)\n mod2 = sympy.Wild('modulus2', integer=True)\n for term1 in expr.args:\n m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))\n if m1:\n for term2 in expr.args:\n m2 = term2.match(m1[scale] * m1[mod1] * ModularIndexing(m1[base], m1[divisor] * m1[mod1], mod2))\n if m2 and term1 != term2:\n expr = join_dimensions(expr - term1 - term2 + m1[scale] * ModularIndexing(m1[base], m1[divisor], m1[mod1] * m2[mod2]))\n return expr\n for term1 in expr.args:\n m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))\n if m1:\n for term2 in expr.args:\n m2 = term2.match(m1[scale] * m1[mod1] * FloorDiv(m1[base], m1[divisor] * m1[mod1]))\n if m2 is not None:\n expr = join_dimensions(expr - term1 - term2 + m1[scale] * FloorDiv(m1[base], m1[divisor]))\n return expr\n return expr", + "docstring": "ModularIndexing(i0, 1, 32) + 32 * ModularIndexing(i0, 32, 4) becomes ModularIndexing(i0, 1, 128) ModularIndexing(i0, 1, 32) + 32 * FloorDiv(i0, 32) becomes i0 This type of pattern can come from view operations", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:_join_dimensions_cached arg:expr arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign Call For Assign Call Call If For Assign Call Call If BoolOp Compare Assign Call Call Return return:yes For Assign Call Call If For Assign Call Call If Compare Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_compile_threads", + "source_code": "def get_compile_threads() -> int:\n if config.compile_threads is None:\n config.compile_threads = config.decide_compile_threads()\n return config.compile_threads", + "docstring": "Temporary for internal rollout. Assign config.compile_threads lazily and return it. TODO: remove after rollout.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\async_compile.py", + "ast_data": "FunctionDef name:get_compile_threads arguments If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "add_meta_graph", + "source_code": "def add_meta_graph(self, meta_graph_def, global_step=None):\n if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):\n raise TypeError('meta_graph_def must be type MetaGraphDef, saw type: %s' % type(meta_graph_def))\n meta_graph_bytes = meta_graph_def.SerializeToString()\n event = event_pb2.Event(meta_graph_def=meta_graph_bytes)\n self._add_event(event, global_step)", + "docstring": "Adds a to the event file. The allows running the given graph via . Args: meta_graph_def: A object, often as returned by . global_step: Number. Optional global step counter to record with the graph. Raises: TypeError: If both is not an instance of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py", + "ast_data": "FunctionDef name:add_meta_graph arg:self arg:meta_graph_def arg:global_step arguments arg arg arg If Call Raise Call Call Assign Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "toolmanager_connect", + "source_code": "def toolmanager_connect(self, s, func):\n return self._callbacks.connect(s, func)", + "docstring": "Connect event with string *s* to *func*. Parameters ---------- s : str The name of the event. The following events are recognized: - 'tool_message_event' - 'tool_removed_event' - 'tool_added_event' For every tool added a new event is created - 'tool_trigger_TOOLNAME', where TOOLNAME is the id of the tool. func : callable Callback function for the toolmanager event with signature:: def func(event: ToolEvent) -> Any Returns ------- cid The callback id for the connection. This can be used in .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:toolmanager_connect arg:self arg:s arg:func arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "datum", + "source_code": "@property\ndef datum(self):\n return self.srs['datum']", + "docstring": "Return the datum for this spatial reference.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", + "ast_data": "FunctionDef name:datum arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "psnr_loss", + "source_code": "def psnr_loss(image: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:\n return -1.0 * metrics.psnr(image, target, max_val)", + "docstring": "Compute the PSNR loss. The loss is computed as follows: .. math:: \\text{loss} = -\\text{psnr(x, y)} See :meth: for details abut PSNR. Args: image: the input image with shape :math:. target : the labels image with shape :math:. max_val: The maximum value in the image tensor. Return: the computed loss as a scalar. Examples: >>> ones = torch.ones(1) >>> psnr_loss(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10) tensor(-20.0000)", + "type": "function", + "file_path": "kornia\\kornia\\losses\\psnr.py", + "ast_data": "FunctionDef name:psnr_loss arg:image arg:target arg:max_val arguments arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n if isinstance(X, str):\n raise ValueError('Iterable over raw text documents expected, string object received.')\n self._warn_for_unused_params()\n self._validate_ngram_range()\n self._get_hasher().fit(X, y=y)\n return self", + "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Call Raise Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_postprocess_for_cut", + "source_code": "def _postprocess_for_cut(fac, bins, retbins: bool, original):\n if isinstance(original, ABCSeries):\n fac = original._constructor(fac, index=original.index, name=original.name)\n if not retbins:\n return fac\n if isinstance(bins, Index) and is_numeric_dtype(bins.dtype):\n bins = bins._values\n return (fac, bins)", + "docstring": "handles post processing for the cut method where we combine the index information if the originally passed datatype was a series", + "type": "function", + "file_path": "pandas\\pandas\\core\\reshape\\tile.py", + "ast_data": "FunctionDef name:_postprocess_for_cut arg:fac arg:bins arg:retbins arg:original arguments arg arg arg arg If Call Assign Call If Return return:yes If BoolOp Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "unsqueeze", + "source_code": "@_onnx_symbolic('aten::unsqueeze')\n@symbolic_helper.parse_args('v', 'i')\ndef unsqueeze(g: jit_utils.GraphContext, self, dim):\n if dim < 0:\n rank = symbolic_helper._get_tensor_rank(self)\n if rank is not None:\n warnings.warn('ONNX export unsqueeze with negative axis ' + str(dim) + ' might cause the onnx model to be incorrect. ' + 'Negative axis is not supported in ONNX. ' + 'Axis is converted to ' + str(dim + rank + 1) + ' based on input shape at export time. ' + 'Passing an tensor of different rank in execution will be incorrect.')\n dim = dim + rank + 1\n else:\n return symbolic_helper._unimplemented('unsqueeze', 'negative axis with unknown input rank', self)\n return symbolic_helper._unsqueeze_helper(g, self, axes_i=[dim])", + "docstring": "Implement unsqueezing a pytorch tensor in ONNX by inserting a new dimension at the specified", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", + "ast_data": "FunctionDef name:unsqueeze arg:g arg:self arg:dim arguments arg arg arg If Compare Assign Call If Compare Call Call Call Assign Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "getdomain", + "source_code": "def getdomain(x):\n [x] = as_series([x], trim=False)\n if x.dtype.char in np.typecodes['Complex']:\n rmin, rmax = (x.real.min(), x.real.max())\n imin, imax = (x.imag.min(), x.imag.max())\n return np.array((complex(rmin, imin), complex(rmax, imax)))\n else:\n return np.array((x.min(), x.max()))", + "docstring": "Return a domain suitable for given abscissae. Find a domain suitable for a polynomial or Chebyshev series defined at the values supplied. Parameters ---------- x : array_like 1-d array of abscissae whose domain will be determined. Returns ------- domain : ndarray 1-d array containing two values. If the inputs are complex, then the two returned points are the lower left and upper right corners of the smallest rectangle (aligned with the axes) in the complex plane containing the points . If the inputs are real, then the two points are the ends of the smallest interval containing the points . See Also -------- mapparms, mapdomain Examples -------- >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) >>> pu.getdomain(points) array([-5., 4.]) >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle >>> pu.getdomain(c) array([-1.-1.j, 1.+1.j])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polyutils.py", + "ast_data": "FunctionDef name:getdomain arg:x arguments arg Assign Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "Shubert04", + "source_code": "class Shubert04(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[-0.80032121, -7.08350592]]\n self.fglob = -29.016015\n\n def fun(self, x, *args):\n self.nfev += 1\n j = atleast_2d(arange(1, 6)).T\n y = -j * cos((j + 1) * x + j)\n return sum(sum(y))", + "docstring": "Shubert 4 objective function. This class defines the Shubert 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shubert04}}(x) = \\left(\\sum_{i=1}^n \\sum_{j=1}^5 -j \\cos ((j+1)x_i + j)\\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: (and many others). .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil#135 has wrong global minimum value, and is missing a minus sign before the whole thing.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Shubert04 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "write", + "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef write(self, writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str='Sheet1', startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None, engine: str | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n from pandas.io.excel import ExcelWriter\n num_rows, num_cols = self.df.shape\n if num_rows > self.max_rows or num_cols > self.max_cols:\n raise ValueError(f'This sheet is too large! Your sheet size is: {num_rows}, {num_cols} Max sheet size is: {self.max_rows}, {self.max_cols}')\n if engine_kwargs is None:\n engine_kwargs = {}\n formatted_cells = self.get_formatted_cells()\n if isinstance(writer, ExcelWriter):\n need_save = False\n else:\n writer = ExcelWriter(writer, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs)\n need_save = True\n try:\n writer._write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes)\n finally:\n if need_save:\n writer.close()", + "docstring": "writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes : tuple of integer (length 2), default None Specifies the one-based bottommost row and rightmost column that is to be frozen engine : string, default None write engine to use if writer is a path - you can also set this via the options ``. {storage_options} engine_kwargs: dict, optional Arbitrary keyword arguments passed to excel engine.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\excel.py", + "ast_data": "FunctionDef name:write arg:self arg:writer arg:sheet_name arg:startrow arg:startcol arg:freeze_panes arg:engine arg:storage_options arg:engine_kwargs arguments arg arg arg arg arg arg arg arg arg Assign If BoolOp Compare Compare Raise Call If Compare Assign Assign Call If Call Assign Assign Call Assign Try Call If Call Call" + }, + { + "library": "numpy", + "name": "_iszip", + "source_code": "def _iszip(self, filename):\n fname, ext = os.path.splitext(filename)\n return ext in _file_openers.keys()", + "docstring": "Test if the filename is a zip file by looking at the file extension.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_datasource.py", + "ast_data": "FunctionDef name:_iszip arg:self arg:filename arguments arg arg Assign Call Return return:yes Compare Call" + }, + { + "library": "sphinx", + "name": "tabular_col_spec", + "source_code": "class tabular_col_spec(nodes.Element):\n pass", + "docstring": "Node for specifying tabular columns, used for LaTeX output.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:tabular_col_spec" + }, + { + "library": "numpy", + "name": "ArithOp", + "source_code": "class ArithOp(Enum):\n POS = 1\n NEG = 2\n ADD = 3\n SUB = 4\n MUL = 5\n DIV = 6\n POW = 7", + "docstring": "Used in Op.APPLY expression to specify the function part.", + "type": "class", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "ClassDef name:ArithOp Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "scrapy", + "name": "enqueue_request", + "source_code": "@abstractmethod\ndef enqueue_request(self, request: Request) -> bool:\n raise NotImplementedError", + "docstring": "Process a request received by the engine. Return `` when the request is rejected by the dupefilter.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\scheduler.py", + "ast_data": "FunctionDef name:enqueue_request arg:self arg:request arguments arg arg Raise" + }, + { + "library": "django", + "name": "wrap_database_errors", + "source_code": "@cached_property\ndef wrap_database_errors(self):\n return DatabaseErrorWrapper(self)", + "docstring": "Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:wrap_database_errors arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "process_value", + "source_code": "@staticmethod\ndef process_value(value):\n is_scalar = not np.iterable(value)\n if is_scalar:\n value = [value]\n dtype = np.min_scalar_type(value)\n if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:\n dtype = np.promote_types(dtype, np.float32)\n mask = np.ma.getmask(value)\n data = np.asarray(value)\n result = np.ma.array(data, mask=mask, dtype=dtype, copy=True)\n return (result, is_scalar)", + "docstring": "Homogenize the input *value* for easy and efficient normalization. *value* can be a scalar or sequence. Parameters ---------- value Data to normalize. Returns ------- result : masked array Masked array with the same shape as *value*. is_scalar : bool Whether *value* is a scalar. Notes ----- Float dtypes are preserved; integer types with two bytes or smaller are converted to np.float32, and larger types are converted to np.float64. Preserving float32 when possible, and using in-place operations, greatly improves speed for large arrays.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:process_value arg:value arguments arg Assign Call If Assign Assign Call If BoolOp Call Compare Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "async_only_middleware", + "source_code": "def async_only_middleware(func):\n func.sync_capable = False\n func.async_capable = True\n return func", + "docstring": "Mark a middleware factory as returning an async middleware.", + "type": "function", + "file_path": "django\\django\\utils\\decorators.py", + "ast_data": "FunctionDef name:async_only_middleware arg:func arguments arg Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "load", + "source_code": "def load(name, sources: Union[str, list[str]], extra_cflags=None, extra_cuda_cflags=None, extra_sycl_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda: Optional[bool]=None, with_sycl: Optional[bool]=None, is_python_module=True, is_standalone=False, keep_intermediates=True):\n return _jit_compile(name, [sources] if isinstance(sources, str) else sources, extra_cflags, extra_cuda_cflags, extra_sycl_cflags, extra_ldflags, extra_include_paths, build_directory or _get_build_directory(name, verbose), verbose, with_cuda, with_sycl, is_python_module, is_standalone, keep_intermediates=keep_intermediates)", + "docstring": "Load a PyTorch C++ extension just-in-time (JIT). To load an extension, a Ninja build file is emitted, which is used to compile the given sources into a dynamic library. This library is subsequently loaded into the current Python process as a module and returned from this function, ready for use. By default, the directory to which the build file is emitted and the resulting library compiled to is ` the name of the extension. This location can be overridden in two ways. First, if the `True`True``. Return the path to the executable. (On Windows, TORCH_LIB_PATH is added to the PATH environment variable as a side effect.) Example: >>> # xdoctest: +SKIP >>> from torch.utils.cpp_extension import load >>> module = load( ... name='extension', ... sources=['extension.cpp', 'extension_kernel.cu'], ... extra_cflags=['-O2'], ... verbose=True)", + "type": "function", + "file_path": "pytorch\\torch\\utils\\cpp_extension.py", + "ast_data": "FunctionDef name:load arg:name arg:sources arg:extra_cflags arg:extra_cuda_cflags arg:extra_sycl_cflags arg:extra_ldflags arg:extra_include_paths arg:build_directory arg:verbose arg:with_cuda arg:with_sycl arg:is_python_module arg:is_standalone arg:keep_intermediates arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call BoolOp Call" + }, + { + "library": "pytorch", + "name": "_compute_nparams_toprune", + "source_code": "def _compute_nparams_toprune(amount, tensor_size):\n if isinstance(amount, numbers.Integral):\n return amount\n else:\n return round(amount * tensor_size)", + "docstring": "Convert the pruning amount from a percentage to absolute value. Since amount can be expressed either in absolute value or as a percentage of the number of units/channels in a tensor, this utility function converts the percentage to absolute value to standardize the handling of pruning. Args: amount (int or float): quantity of parameters to prune. If float, should be between 0.0 and 1.0 and represent the fraction of parameters to prune. If int, it represents the absolute number of parameters to prune. tensor_size (int): absolute number of parameters in the tensor to prune. Returns: int: the number of units to prune in the tensor", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:_compute_nparams_toprune arg:amount arg:tensor_size arguments arg arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_register_device_module", + "source_code": "def _register_device_module(device_type, module):\n device_type = torch.device(device_type).type\n m = sys.modules[__name__]\n if hasattr(m, device_type):\n raise RuntimeError(f\"The runtime module of '{device_type}' has already been registered with '{getattr(m, device_type)}'\")\n setattr(m, device_type, module)\n torch_module_name = '.'.join([__name__, device_type])\n sys.modules[torch_module_name] = module", + "docstring": "Register an external runtime module of the specific :attr: supported by torch. After the :attr: is registered correctly, the user can refer the external runtime module as part of torch with attribute torch.xxx.", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:_register_device_module arg:device_type arg:module arguments arg arg Assign Call Assign If Call Raise Call Call Call Assign Call Assign" + }, + { + "library": "pytorch", + "name": "decode_exception_table_varint", + "source_code": "def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int:\n b = next(bytes_iter)\n val = b & 63\n while b & 64:\n val <<= 6\n b = next(bytes_iter)\n val |= b & 63\n return val", + "docstring": "Inverse of .", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:decode_exception_table_varint arg:bytes_iter arguments arg Assign Call Assign While Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_c_function", + "source_code": "def get_c_function(self, name):\n self.ensure_initialized()\n return c_api_util.ScopedTFFunction(pywrap_tfe.TFE_ContextGetFunction(self._handle, name), name)", + "docstring": "Get a C API TF_Function from the context. Args: name: Name of the function to get. Returns: A ScopedTFFunction wrapping the C API TF_Function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:get_c_function arg:self arg:name arguments arg arg Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, nbins=None, **kwargs):\n if nbins is not None:\n kwargs['nbins'] = nbins\n self.set_params(**{**self.default_params, **kwargs})", + "docstring": "Parameters ---------- nbins : int or 'auto', default: 10 Maximum number of intervals; one less than max number of ticks. If the string 'auto', the number of bins will be automatically determined based on the length of the axis. steps : array-like, optional Sequence of acceptable tick multiples, starting with 1 and ending with 10. For example, if `axes.autolimit_mode` is 'round_numbers'). Removing such ticks is mostly useful for stacked or ganged plots, where the upper tick of an Axes overlaps with the lower tick of the axes above it. min_n_ticks : int, default: 2 Relax *nbins* and *integer* constraints if necessary to obtain this minimum number of ticks.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:nbins arguments arg arg arg If Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "to_dict", + "source_code": "def to_dict(self):\n return self.__dict__.copy()", + "docstring": "Return a copy of the subplot parameters as a dict.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:to_dict arg:self arguments arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "note_object", + "source_code": "def note_object(self, objtype: str, name: str, labelid: str, location: Any=None) -> None:\n if (objtype, name) in self.objects:\n docname = self.objects[objtype, name][0]\n logger.warning(__('duplicate %s description of %s, other instance in %s'), objtype, name, docname, location=location)\n self.objects[objtype, name] = (self.env.docname, labelid)", + "docstring": "Note a generic object for cross reference. .. versionadded:: 3.0", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", + "ast_data": "FunctionDef name:note_object arg:self arg:objtype arg:name arg:labelid arg:location arguments arg arg arg arg arg If Compare Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "should_save_summary", + "source_code": "@property\ndef should_save_summary(self):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Whether saving summaries is needed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:should_save_summary arg:self arguments arg Raise Call" + }, + { + "library": "matplotlib", + "name": "summer", + "source_code": "def summer() -> None:\n set_cmap('summer')", + "docstring": "Set the colormap to 'summer'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:summer arguments Call" + }, + { + "library": "pytorch", + "name": "UnspecializedBuiltinNNModuleVariable", + "source_code": "class UnspecializedBuiltinNNModuleVariable(UnspecializedNNModuleVariable):\n\n def _wrap_source(self, attr_source):\n if not isinstance(attr_source, UnspecializedBuiltinNNModuleSource):\n return UnspecializedBuiltinNNModuleSource(attr_source)\n return attr_source", + "docstring": "Differentiates between builtin nn modules (e.g. torch.nn.Linear) and user defined nn modules.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py", + "ast_data": "ClassDef name:UnspecializedBuiltinNNModuleVariable FunctionDef name:_wrap_source arg:self arg:attr_source arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_xy", + "source_code": "def set_xy(self, xy):\n xy = np.asarray(xy)\n nverts, _ = xy.shape\n if self._closed:\n if nverts == 1 or (nverts > 1 and (xy[0] != xy[-1]).any()):\n xy = np.concatenate([xy, [xy[0]]])\n elif nverts > 2 and (xy[0] == xy[-1]).all():\n xy = xy[:-1]\n self._path = Path(xy, closed=self._closed)\n self.stale = True", + "docstring": "Set the vertices of the polygon. Parameters ---------- xy : (N, 2) array-like The coordinates of the vertices. Notes ----- Unlike , we do not ignore the last input vertex. If the polygon is meant to be closed, and the last point of the polygon is not equal to the first, we assume that the user has not explicitly passed a `` vertex, and add it ourselves.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_xy arg:self arg:xy arguments arg arg Assign Call Assign If If BoolOp Compare BoolOp Compare Call Compare Assign Call If BoolOp Compare Call Compare Assign Assign Call Assign" + }, + { + "library": "pytorch", + "name": "FeatureAlphaDropout", + "source_code": "class FeatureAlphaDropout(_DropoutNd):\n\n def forward(self, input: Tensor) -> Tensor:\n return F.feature_alpha_dropout(input, self.p, self.training)", + "docstring": "Randomly masks out entire channels. A channel is a feature map, e.g. the :math:-th channel of the :math:-th sample in the batch input is a tensor :math: of the input tensor). Instead of setting activations to zero, as in regular Dropout, the activations are set to the negative saturation value of the SELU activation function. More details can be found in the paper _ . Each element will be masked independently for each sample on every forward call with probability :attr: using samples from a Bernoulli distribution. The elements to be masked are randomized on every forward call, and scaled and shifted to maintain zero mean and unit variance. Usually the input comes from :class: modules. As described in the paper _ , if adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then i.i.d. dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, :func: will help promote independence between feature maps and should be used instead. Args: p (float, optional): probability of an element to be zeroed. Default: 0.5 inplace (bool, optional): If set to `(N, C, D, H, W)(C, D, H, W)(N, C, D, H, W)(C, D, H, W)` (same shape as input). Examples:: >>> m = nn.FeatureAlphaDropout(p=0.2) >>> input = torch.randn(20, 16, 4, 32, 32) >>> output = m(input) .. _Self-Normalizing Neural Networks: .. _Efficient Object Localization Using Convolutional Networks:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\dropout.py", + "ast_data": "ClassDef name:FeatureAlphaDropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "SELU", + "source_code": "class SELU(Module):\n __constants__ = ['inplace']\n inplace: bool\n\n def __init__(self, inplace: bool=False) -> None:\n super().__init__()\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.selu(input, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = 'inplace=True' if self.inplace else ''\n return inplace_str", + "docstring": "Applies the SELU function element-wise. .. math:: \\text{SELU}(x) = \\text{scale} * (\\max(0,x) + \\min(0, \\alpha * (\\exp(x) - 1))) with :math: and :math:. .. warning:: When using `Self-Normalizing Neural Networkstorch.nn.init.calculate_gainSelf-Normalizing Neural Networks(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/SELU.png Examples:: >>> m = nn.SELU() >>> input = torch.randn(2) >>> output = m(input) .. _Self-Normalizing Neural Networks:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:SELU Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "register_module_backward_hook", + "source_code": "def register_module_backward_hook(hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle:\n global _global_is_full_backward_hook\n if _global_is_full_backward_hook is True:\n raise RuntimeError('Cannot use both regular backward hooks and full backward hooks as a global Module hook. Please use only one of them.')\n _global_is_full_backward_hook = False\n handle = RemovableHandle(_global_backward_hooks)\n _global_backward_hooks[handle.id] = hook\n return handle", + "docstring": "Register a backward hook common to all the modules. This function is deprecated in favor of :func: and the behavior of this function will change in future versions. Returns: :class:: a handle that can be used to remove the added hook by calling ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:register_module_backward_hook arg:hook arguments arg If Compare Raise Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "convert_variable_to_constant", + "source_code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n index = incoming_edge.destination.index\n for edge in self.outgoing_edges:\n if edge.source.index == index:\n edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)\n function = self.converted_self().function\n function.signature.input_arg[index].type = tensor_data.dtype\n if '_input_shapes' in function.attr:\n function.attr['_input_shapes'].list.shape[index].unknown_rank = True\n del function.attr['_input_shapes'].list.shape[index].dim[:]\n arg_attrs = function.arg_attr[index].attr\n if '_output_shapes' in arg_attrs:\n arg_attrs['_output_shapes'].list.shape[0].unknown_rank = True\n del arg_attrs['_output_shapes'].list.shape[0].dim[:]", + "docstring": "Converts one function argument into a constant. Args: incoming_edge: The edge into the argument to be converted. tensor_data: The constant value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Assign For If Compare Call Assign Call Assign If Compare Assign Assign If Compare Assign" + }, + { + "library": "pytorch", + "name": "can_use_cudnn_attention", + "source_code": "def can_use_cudnn_attention(params: SDPAParams, debug: bool=False) -> bool:\n return torch._C._can_use_cudnn_attention(params, debug)", + "docstring": "Check if cudnn_attention can be utilized in scaled_dot_product_attention. Args: params: An instance of SDPAParams containing the tensors for query, key, value, an optional attention mask, dropout rate, and a flag indicating if the attention is causal. debug: Whether to logging.warn with information as to why cuDNN attention could not be run. Defaults to False. Returns: True if cuDNN can be used with the given parameters; otherwise, False. Note: This function is dependent on a CUDA-enabled build of PyTorch. It will return False in non-CUDA environments.", + "type": "function", + "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py", + "ast_data": "FunctionDef name:can_use_cudnn_attention arg:params arg:debug arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "local_offsets", + "source_code": "def local_offsets(self) -> list[torch.Size]:\n return [chunk.offsets for chunk in self._storage_meta.chunks]", + "docstring": "Returns a list of :class:`torch.Size' corresponding to the local offsets for the shards on this rank. Returns an empty list if the current rank does not host any shards for this Tensor.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py", + "ast_data": "FunctionDef name:local_offsets arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_any_version_depends_on_gradient", + "source_code": "def _any_version_depends_on_gradient(self) -> set[int]:\n depends_on_gradient: set[int] = set()\n while True:\n start_size = len(depends_on_gradient)\n for node in self._data_flow_graph.flow_nodes:\n ids = tuple((key.id for key, (_, version) in node.inputs.items() if self._categories.get(key, version) in (Category.GRADIENT, Category.PARAMETER) or key.id in depends_on_gradient))\n if ids:\n depends_on_gradient.update(ids)\n depends_on_gradient.update((key.id for key in node.outputs))\n if len(depends_on_gradient) == start_size:\n return depends_on_gradient", + "docstring": "Extract IDs of Tensors which depend or will depend on a gradient. Note that this weakened definition of \"depends\" requires us to loop over the data flow graph multiple times because it allows dependency information to flow backward through edges and removes the guarantee that nodes are topologically sorted. (Or indeed, even that a valid topological order exists.) Put another way, we have converted an acyclic data flow graph into a cyclic graph and we are attempting to partition cycles involving a gradient from the rest of the graph.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", + "ast_data": "FunctionDef name:_any_version_depends_on_gradient arg:self arguments arg Call While Assign Call For Assign Call Call BoolOp Compare Call Compare If Call Call If Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "_require_listlike", + "source_code": "def _require_listlike(level, arr, arrname: str):\n if level is not None and (not is_list_like(level)):\n if not is_list_like(arr):\n raise TypeError(f'{arrname} must be list-like')\n if len(arr) > 0 and is_list_like(arr[0]):\n raise TypeError(f'{arrname} must be list-like')\n level = [level]\n arr = [arr]\n elif level is None or is_list_like(level):\n if not is_list_like(arr) or not is_list_like(arr[0]):\n raise TypeError(f'{arrname} must be list of lists-like')\n return (level, arr)", + "docstring": "Ensure that level is either None or listlike, and arr is list-of-listlike.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:_require_listlike arg:level arg:arr arg:arrname arguments arg arg arg If BoolOp Compare Call If Call Raise Call If BoolOp Compare Call Call Raise Call Assign Assign If BoolOp Compare Call If BoolOp Call Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n validate_data(self, X, accept_sparse='csr')\n return self", + "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : None Ignored. Returns ------- self : object Fitted transformer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_var", + "source_code": "def _var(self, dim, df, scale):\n if df > dim + 3:\n var = (df - dim + 1) * scale ** 2\n diag = scale.diagonal()\n var += (df - dim - 1) * np.outer(diag, diag)\n var /= (df - dim) * (df - dim - 1) ** 2 * (df - dim - 3)\n else:\n var = None\n return var", + "docstring": "Variance of the inverse Wishart distribution. Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_var arg:self arg:dim arg:df arg:scale arguments arg arg arg arg If Compare Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "django", + "name": "is_password_usable", + "source_code": "def is_password_usable(encoded):\n return encoded is None or not encoded.startswith(UNUSABLE_PASSWORD_PREFIX)", + "docstring": "Return True if this password wasn't generated by User.set_unusable_password(), i.e. make_password(None).", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:is_password_usable arg:encoded arguments arg Return return:yes BoolOp Compare Call" + }, + { + "library": "scipy", + "name": "aps07_f", + "source_code": "def aps07_f(x, n):\n return (1 + (1 - n) ** 2) * x - (1 - n * x) ** 2", + "docstring": "Upside down parabola with parametrizable height", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps07_f arg:x arg:n arguments arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "matrix", + "source_code": "def matrix(self) -> Tensor:\n return quaternion_to_rotation_matrix(self.data)", + "docstring": "Convert the quaternion to a rotation matrix of shape :math:. Example: >>> q = Quaternion.identity() >>> m = q.matrix() >>> m tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:matrix arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_permutation_distribution_t", + "source_code": "def _permutation_distribution_t(data, permutations, size_a, equal_var, random_state=None):\n random_state = check_random_state(random_state)\n size = data.shape[-1]\n n_max = special.comb(size, size_a)\n if permutations < n_max:\n perm_generator = (random_state.permutation(size) for i in range(permutations))\n else:\n permutations = n_max\n perm_generator = (np.concatenate(z) for z in _all_partitions(size_a, size - size_a))\n t_stat = []\n for indices in _batch_generator(perm_generator, batch=50):\n indices = np.array(indices)\n data_perm = data[..., indices]\n data_perm = np.moveaxis(data_perm, -2, 0)\n a = data_perm[..., :size_a]\n b = data_perm[..., size_a:]\n t_stat.append(_calc_t_stat(a, b, equal_var))\n t_stat = np.concatenate(t_stat, axis=0)\n return (t_stat, permutations, n_max)", + "docstring": "Generation permutation distribution of t statistic", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:_permutation_distribution_t arg:data arg:permutations arg:size_a arg:equal_var arg:random_state arguments arg arg arg arg arg Assign Call Assign Assign Call If Compare Assign Call Call Assign Assign Call Call Assign For Call Assign Call Assign Assign Call Assign Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "xw_plus_b_v1", + "source_code": "def xw_plus_b_v1(x, weights, biases, name=None):\n with ops.name_scope(name, 'xw_plus_b_v1', [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name='x')\n weights = ops.convert_to_tensor(weights, name='weights')\n biases = ops.convert_to_tensor(biases, name='biases')\n mm = math_ops.matmul(x, weights)\n return bias_add_v1(mm, biases, name=name)", + "docstring": "Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified \"xw_plus_b_v1\" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:xw_plus_b_v1 arg:x arg:weights arg:biases arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "initialize", + "source_code": "@tf_export(v1=['summary.initialize'])\ndef initialize(graph=None, session=None):\n if context.executing_eagerly():\n return\n if _summary_state.writer is None:\n raise RuntimeError('No default tf.contrib.summary.SummaryWriter found')\n if session is None:\n session = ops.get_default_session()\n if session is None:\n raise ValueError('Argument `session must be passed if no default session exists')\n session.run(summary_writer_initializer_op())\n if graph is not None:\n data = _serialize_graph(graph)\n x = array_ops.placeholder(dtypes.string)\n session.run(graph_v1(x, 0), feed_dict={x: data})", + "docstring": "Initializes summary writing for graph execution mode. This operation is a no-op when executing eagerly. This helper method provides a higher-level alternative to using and . Most users will also want to call which can happen before or after this function is called. Args: graph: A or to output to the writer. This function will not write the default graph by default. When writing to an event log file, the associated step will be zero. session: So this method can call . This defaults to . Raises: RuntimeError: If the current thread has no default . ValueError: If session wasn't passed and no default session.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:initialize arg:graph arg:session arguments arg arg If Call Return return:no If Compare Raise Call If Compare Assign Call If Compare Raise Call Call Call If Compare Assign Call Assign Call Call Call Call" + }, + { + "library": "scipy", + "name": "pdf", + "source_code": "def pdf(self, x, s2, mu=0, lmbda=1, a=1, b=1):\n invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b)\n s2 = args[1]\n with np.errstate(all='ignore'):\n pdf = np.asarray(self._pdf(*args))\n pdf[s2 <= 0] = 0\n pdf[invalid] = np.nan\n return pdf[()]", + "docstring": "The probability density function. Parameters ---------- x, s2 : array_like Arguments. must be greater than zero. mu, lmbda, a, b : array_like, optional Shape parameters. , , and must be greater than zero. Returns ------- logpdf : ndarray or scalar The probability density function.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:pdf arg:self arg:x arg:s2 arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg arg arg Assign Call Assign With Call Assign Call Call Assign Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "decorated", + "source_code": "def decorated(metric_obj, *args, **kwargs):\n strategy = distribute_lib.get_strategy()\n for weight in metric_obj.weights:\n if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n update_op = update_state_fn(*args, **kwargs)\n if update_op is not None:\n metric_obj.add_update(update_op)\n return update_op", + "docstring": "Decorated function with .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py", + "ast_data": "FunctionDef name:decorated arg:metric_obj arguments arg arg arg Assign Call For If BoolOp Call Call Call Raise Call With Call Assign Call If Compare Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "default_status", + "source_code": "@classproperty\ndef default_status(cls):\n return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302", + "docstring": "Redirect status for the request. This is the default handler. RFC 2616 indicates a 301 response code fits our goal; however, browser support for 301 is quite messy. Use 302/303 instead. See", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:default_status arg:cls arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_calculate_replicas_with_values", + "source_code": "def _calculate_replicas_with_values(strategy, input_workers, optional_list):\n worker_has_values = []\n for worker, optionals in zip(input_workers.worker_devices, optional_list):\n with ops.device(worker):\n device_has_values = [math_ops.cast(v.has_value(), dtypes.int64) for v in optionals]\n worker_has_values.append(math_ops.reduce_sum(device_has_values, keepdims=True))\n client_has_values = math_ops.reduce_sum(worker_has_values, keepdims=True)\n if strategy.extended._in_multi_worker_mode():\n global_has_values = strategy.reduce(reduce_util.ReduceOp.SUM, client_has_values, axis=None)\n return array_ops.reshape(global_has_values, [])\n else:\n return array_ops.reshape(client_has_values, [])", + "docstring": "Computes the number of replicas that have values. Args: strategy: the . input_workers: the . optional_list: a list of lists . The values from each compute device grouped by the input device. Returns: A scalar Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:_calculate_replicas_with_values arg:strategy arg:input_workers arg:optional_list arguments arg arg arg Assign For Call With Call Assign Call Call Call Call Assign Call If Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "kl_div", + "source_code": "def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean', log_target: bool=False) -> Tensor:\n if has_torch_function_variadic(input, target):\n return handle_torch_function(kl_div, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction, log_target=log_target)\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n if reduction == 'mean':\n warnings.warn(\"reduction: 'mean' divides the total loss by both the batch size and the support size.'batchmean' divides only by the batch size, and aligns with the KL div math definition.'mean' will be changed to behave the same as 'batchmean' in the next major release.\")\n if reduction == 'batchmean':\n reduction_enum = _Reduction.get_enum('sum')\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)\n if reduction == 'batchmean' and input.dim() != 0:\n reduced = reduced / input.size()[0]\n return reduced", + "docstring": "Compute the KL Divergence loss. Refer - The __ See :class: for details. Args: input: Tensor of arbitrary shape in log-probabilities. target: Tensor of the same shape as input. See :attr: for the target's interpretation. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: `size_averagereducereductionreductionreduction` which aligns with KL math definition.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:kl_div arg:input arg:target arg:size_average arg:reduce arg:reduction arg:log_target arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call If Compare Call If Compare Assign Call Assign Call Assign Call If BoolOp Compare Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_graph_network", + "source_code": "def _is_graph_network(layer):\n if isinstance(layer, RevivedNetwork):\n return False\n elif isinstance(layer, functional_lib.Functional):\n return layer._is_graph_network or isinstance(layer, models_lib.Sequential)\n return False", + "docstring": "Determines whether the layer is a graph network.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_is_graph_network arg:layer arguments arg If Call Return return:yes If Call Return return:yes BoolOp Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_has_fsdp_params", + "source_code": "@no_type_check\ndef _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool:\n return _module_handle(state, module) is not None", + "docstring": "Returns if `` has parameters managed by FSDP.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py", + "ast_data": "FunctionDef name:_has_fsdp_params arg:state arg:module arguments arg arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "variable_dtype", + "source_code": "@property\ndef variable_dtype(self):\n return self.dtype", + "docstring": "Alias of , the dtype of the weights.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:variable_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "tick_top", + "source_code": "def tick_top(self):\n label = True\n if 'label1On' in self._major_tick_kw:\n label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n self.set_ticks_position('top')\n self.set_tick_params(which='both', labeltop=label)", + "docstring": "Move ticks and ticklabels (if present) to the top of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:tick_top arg:self arguments arg Assign If Compare Assign BoolOp Call Call" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, times, order=0):\n if order not in [0, 1, 2]:\n raise ValueError('`order` must be 0, 1 or 2.')\n times = np.asarray(times, dtype=float)\n if times.ndim > 1:\n raise ValueError('`times` must be at most 1-dimensional.')\n singe_time = times.ndim == 0\n times = np.atleast_1d(times)\n rotvecs = self.interpolator(times)\n if order == 0:\n index = np.searchsorted(self.times, times, side='right')\n index -= 1\n index[index < 0] = 0\n n_segments = len(self.times) - 1\n index[index > n_segments - 1] = n_segments - 1\n result = self.rotations[index] * Rotation.from_rotvec(rotvecs)\n elif order == 1:\n rotvecs_dot = self.interpolator(times, 1)\n result = _compute_angular_rate(rotvecs, rotvecs_dot)\n elif order == 2:\n rotvecs_dot = self.interpolator(times, 1)\n rotvecs_dot_dot = self.interpolator(times, 2)\n result = _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot)\n else:\n assert False\n if singe_time:\n result = result[0]\n return result", + "docstring": "Compute interpolated values. Parameters ---------- times : float or array_like Times of interest. order : {0, 1, 2}, optional Order of differentiation: * 0 (default) : return Rotation * 1 : return the angular rate in rad/sec * 2 : return the angular acceleration in rad/sec/sec Returns ------- Interpolated Rotation, angular rate or acceleration.", + "type": "method", + "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:times arg:order arguments arg arg arg If Compare Raise Call Assign Call If Compare Raise Call Assign Compare Assign Call Assign Call If Compare Assign Call Assign Compare Assign Call Assign Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Assign Call If Assign Return return:yes" + }, + { + "library": "pandas", + "name": "next_workday", + "source_code": "def next_workday(dt: datetime) -> datetime:\n dt += timedelta(days=1)\n while dt.weekday() > 4:\n dt += timedelta(days=1)\n return dt", + "docstring": "returns next workday used for observances", + "type": "function", + "file_path": "pandas\\pandas\\tseries\\holiday.py", + "ast_data": "FunctionDef name:next_workday arg:dt arguments arg Call While Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_LowerCholesky", + "source_code": "class _LowerCholesky(Constraint):\n event_dim = 2\n\n def check(self, value):\n value_tril = value.tril()\n lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]\n positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0]\n return lower_triangular & positive_diagonal", + "docstring": "Constrain to lower-triangular square matrices with positive diagonals.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_LowerCholesky Assign FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Assign Call Call Compare Assign Call Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_lineoffset", + "source_code": "def set_lineoffset(self, lineoffset):\n if lineoffset == self.get_lineoffset():\n return\n linelength = self.get_linelength()\n segments = self.get_segments()\n pos = 1 if self.is_horizontal() else 0\n for segment in segments:\n segment[0, pos] = lineoffset + linelength / 2.0\n segment[1, pos] = lineoffset - linelength / 2.0\n self.set_segments(segments)\n self._lineoffset = lineoffset", + "docstring": "Set the offset of the lines used to mark each event.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_lineoffset arg:self arg:lineoffset arguments arg arg If Compare Call Return return:no Assign Call Assign Call Assign Call For Assign Assign Call Assign" + }, + { + "library": "pytorch", + "name": "mm_options", + "source_code": "def mm_options(config, sym_m, sym_n, sym_k, layout):\n even_k_symbolic = sympy.gcd(sym_k, config.kwargs['BLOCK_K']) == config.kwargs['BLOCK_K']\n allow_tf32 = torch.backends.cuda.matmul.allow_tf32 and (not inductor_config.force_same_precision or (sym_m % 16 == 0 and sym_n % 16 == 0 and (sym_k % 8 == 0)))\n options_dict = dict(EVEN_K=even_k_symbolic, ALLOW_TF32=allow_tf32, USE_FAST_ACCUM=False, ACC_TYPE=acc_type(layout.dtype), num_stages=config.num_stages, num_warps=config.num_warps, **config.kwargs)\n if 'GROUP_M' not in config.kwargs:\n group_m = config.kwargs.get('GROUP_M', 8)\n options_dict['GROUP_M'] = group_m\n return options_dict", + "docstring": "Common options to matmul triton templates.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py", + "ast_data": "FunctionDef name:mm_options arg:config arg:sym_m arg:sym_n arg:sym_k arg:layout arguments arg arg arg arg arg Assign Compare Call Assign BoolOp BoolOp BoolOp Compare Compare Compare Assign Call Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, t_direction, t, f1, f2, *, where=None, interpolate=False, step=None, **kwargs):\n self.t_direction = t_direction\n self._interpolate = interpolate\n self._step = step\n verts = self._make_verts(t, f1, f2, where)\n super().__init__(verts, **kwargs)", + "docstring": "Parameters ---------- t_direction : {{'x', 'y'}} The axes on which the variable lies. - 'x': the curves are `.PolyCollection`. See Also -------- .Axes.fill_between, .Axes.fill_betweenx", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:t_direction arg:t arg:f1 arg:f2 arguments arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "add_tools_to_manager", + "source_code": "def add_tools_to_manager(toolmanager, tools=default_tools):\n for name, tool in tools.items():\n toolmanager.add_tool(name, tool)", + "docstring": "Add multiple tools to a . Parameters ---------- toolmanager : Manager to which the tools are added. tools : {str: class_like}, optional The tools to add in a {name: tool} dict, see for more info.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:add_tools_to_manager arg:toolmanager arg:tools arguments arg arg For Call Call" + }, + { + "library": "scipy", + "name": "choose_ncv", + "source_code": "def choose_ncv(k):\n return max(2 * k + 1, 20)", + "docstring": "Choose number of lanczos vectors based on target number of singular/eigen values and vectors to compute, k.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\arpack\\arpack.py", + "ast_data": "FunctionDef name:choose_ncv arg:k arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "AddIndexConcurrently", + "source_code": "class AddIndexConcurrently(NotInTransactionMixin, AddIndex):\n atomic = False\n category = OperationCategory.ADDITION\n\n def describe(self):\n return 'Concurrently create index %s on field(s) %s of model %s' % (self.index.name, ', '.join(self.index.fields), self.model_name)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n self._ensure_not_in_transaction(schema_editor)\n model = to_state.apps.get_model(app_label, self.model_name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.add_index(model, self.index, concurrently=True)\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n self._ensure_not_in_transaction(schema_editor)\n model = from_state.apps.get_model(app_label, self.model_name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.remove_index(model, self.index, concurrently=True)", + "docstring": "Create an index using PostgreSQL's CREATE INDEX CONCURRENTLY syntax.", + "type": "class", + "file_path": "django\\django\\contrib\\postgres\\operations.py", + "ast_data": "ClassDef name:AddIndexConcurrently Assign Assign FunctionDef name:describe arg:self arguments arg Return return:yes Call FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Call Assign Call If Call Call FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Call Assign Call If Call Call" + }, + { + "library": "pandas", + "name": "_validate_integer", + "source_code": "def _validate_integer(self, key: int | np.integer, axis: AxisInt) -> None:\n len_axis = len(self.obj._get_axis(axis))\n if key >= len_axis or key < -len_axis:\n raise IndexError('single positional indexer is out-of-bounds')", + "docstring": "Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_validate_integer arg:self arg:key arg:axis arguments arg arg arg Assign Call Call If BoolOp Compare Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "to_int64", + "source_code": "@tf_export(v1=['to_int64'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_int64(x, name='ToInt64'):\n return cast(x, dtypes.int64, name=name)", + "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32)) After: >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64) @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:to_int64 arg:x arg:name arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_set_attr", + "source_code": "def _set_attr(self, attr_name, attr_value) -> None:\n buf = pywrap_tf_session.TF_NewBufferFromString(compat.as_bytes(attr_value.SerializeToString()))\n try:\n self._set_attr_with_buf(attr_name, buf)\n finally:\n pywrap_tf_session.TF_DeleteBuffer(buf)", + "docstring": "Private method used to set an attribute in the node_def.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_set_attr arg:self arg:attr_name arg:attr_value arguments arg arg arg Assign Call Call Call Try Call Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, x, pos=None):\n return self.func(x, pos)", + "docstring": "Return the value of the user defined function. *x* and *pos* are passed through as-is.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_values", + "source_code": "@property\ndef _values(self):\n return self._mgr.internal_values()", + "docstring": "Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public `` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | --------------------- | Numeric | ndarray | ndarray | NumpyExtensionArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| TimedeltaArray | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA |", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:_values arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "needs_unshard", + "source_code": "def needs_unshard(self) -> bool:\n if not self.uses_sharded_strategy:\n return False\n unsharded_flat_param = self._get_padded_unsharded_flat_param()\n already_unsharded = _same_storage_size(unsharded_flat_param, unsharded_flat_param.numel())\n return not already_unsharded", + "docstring": "Return if the handle's flat parameter needs to be unsharded.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:needs_unshard arg:self arguments arg If Return return:yes Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "FakeItemVariable", + "source_code": "class FakeItemVariable(TensorVariable):\n _nonvar_fields = {'need_unwrap', *TensorVariable._nonvar_fields}\n\n def __init__(self, proxy: torch.fx.Proxy, **kwargs) -> None:\n need_unwrap = kwargs.pop('need_unwrap', False)\n super().__init__(proxy, **kwargs)\n self.need_unwrap = need_unwrap\n\n @classmethod\n def from_tensor_variable(cls, tensor_variable):\n return FakeItemVariable(**dict(tensor_variable.__dict__))", + "docstring": "An unspecialized python variable which prevents access to the underlying raw value. This is needed if item is called on a FakeTensor.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\tensor.py", + "ast_data": "ClassDef name:FakeItemVariable Assign FunctionDef name:__init__ arg:self arg:proxy arguments arg arg arg Assign Call Call Call Assign FunctionDef name:from_tensor_variable arg:cls arg:tensor_variable arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:]", + "docstring": "A tuple indicating the shape of this RaggedTensorValue.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_accumulator", + "source_code": "def _get_accumulator(tensor):\n assert isinstance(tensor.graph, func_graph_module.FuncGraph)\n\n def get_func_graph_output(t):\n for output in tensor.graph.outputs:\n if output is t:\n return t\n identity_op = t.consumers()[0]\n if identity_op.type == 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)):\n return identity_op.outputs[0]\n return None\n for consumer in tensor.consumers():\n if consumer.type != 'TensorListPushBack':\n continue\n accum_input_idx = -1\n for accum_input_idx, inp in enumerate(tensor.graph.inputs):\n if inp is consumer.inputs[0]:\n break\n else:\n continue\n output = get_func_graph_output(consumer.outputs[0])\n if output is None:\n continue\n for accum_output_idx, out in enumerate(tensor.graph.outputs):\n if out is output:\n if accum_input_idx == accum_output_idx:\n return output\n break\n return None", + "docstring": "Returns TensorList if any containing accumulated values of tensor. We try to find a pattern of the form: input_tl tensor \\ / (TensorListPushBack) | output_tl which satisfies the following conditions: 1. input_tl must be in tensor.graph.inputs. 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs. 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t). output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is returned if such a pattern is found else None is returned. Args: tensor: The Tensor to be accumulated. Returns: A variant tensor in the same graph as or None if no accumulator is found.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py", + "ast_data": "FunctionDef name:_get_accumulator arg:tensor arguments arg Call FunctionDef name:get_func_graph_output arg:t arguments arg For If Compare Return return:yes Assign Call If BoolOp Compare Call Compare Return return:yes Return return:no For Call If Compare Assign For Call If Compare Assign Call If Compare For Call If Compare If Compare Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "tanh", + "source_code": "@dispatch.add_dispatch_support\ndef tanh(x):\n return nn.tanh(x)", + "docstring": "Hyperbolic tangent activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Args: x: Input tensor. Returns: Tensor of same shape and dtype of input , with tanh activation: .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", + "ast_data": "FunctionDef name:tanh arg:x arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "irfft2", + "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef irfft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n return irfftn(a, s, axes, norm, out=None)", + "docstring": "Computes the inverse of . Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the real output to the inverse FFT. .. versionchanged:: 2.0 If it is `saxesssaxesnumpy.fftirfft2irfftnirfftn`. Examples -------- >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> A = np.fft.rfft2(a) >>> np.fft.irfft2(A, s=a.shape) array([[0., 0., 0., 0., 0.], [1., 1., 1., 1., 1.], [2., 2., 2., 2., 2.], [3., 3., 3., 3., 3.], [4., 4., 4., 4., 4.]])", + "type": "function", + "file_path": "numpy\\numpy\\fft\\_pocketfft.py", + "ast_data": "FunctionDef name:irfft2 arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "render", + "source_code": "def render(self, context):\n with context.render_context.push_state(self):\n if context.template is None:\n with context.bind_template(self):\n context.template_name = self.name\n return self._render(context)\n else:\n return self._render(context)", + "docstring": "Display stage -- can be called many times", + "type": "method", + "file_path": "django\\django\\template\\base.py", + "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg With Call If Compare With Call Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "convolve", + "source_code": "@_ni_docstrings.docfiller\ndef convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0, *, axes=None):\n return _correlate_or_convolve(input, weights, output, mode, cval, origin, True, axes)", + "docstring": "Multidimensional convolution. The array is convolved with the given kernel. Parameters ---------- %(input)s weights : array_like Array of weights, same number of dimensions as input %(output)s %(mode_reflect)s cval : scalar, optional Value to fill past edges of input if is 'constant'. Default is 0.0 origin : int or sequence, optional Controls the placement of the filter on the input array's pixels. A value of 0 (the default) centers the filter over the pixel, with positive values shifting the filter to the right, and negative ones to the left. By passing a sequence of origins with length equal to the number of dimensions of the input array, different shifts can be specified along each axis. axes : tuple of int or None, optional If None, is filtered along all axes. Otherwise, is filtered along the specified axes. When is specified, any tuples used for or must match the length of . The ith entry in any of these tuples corresponds to the ith entry in . Returns ------- result : ndarray The result of convolution of with . See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:, where W is the kernel, j is the N-D spatial index over :math:, I is the and k is the coordinate of the center of W, specified by in the input parameters. Examples -------- Perhaps the simplest case to understand is `weightsinputinputinputinputweights`. >>> c = np.array([[2, 0, 1], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]])", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:convolve arg:input arg:weights arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "matrix_power", + "source_code": "def matrix_power(A, power):\n M, N = A.shape\n if M != N:\n raise TypeError('sparse matrix is not square')\n if isintlike(power):\n power = int(power)\n if power < 0:\n raise ValueError('exponent must be >= 0')\n if power == 0:\n return eye_array(M, dtype=A.dtype)\n if power == 1:\n return A.copy()\n tmp = matrix_power(A, power // 2)\n if power % 2:\n return A @ tmp @ tmp\n else:\n return tmp @ tmp\n else:\n raise ValueError('exponent must be an integer')", + "docstring": "Raise a square matrix to the integer power, . For non-negative integers, `powerApower`, this may be less efficient than computing the product directly, using A @ A @ ... @ A. This is contingent upon the number of nonzero entries in the matrix. .. versionadded:: 1.12.0 Examples -------- >>> from scipy import sparse >>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]]) >>> A.todense() array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) >>> (A @ A).todense() array([[1, 0, 1], [0, 2, 0], [1, 0, 1]]) >>> A2 = sparse.linalg.matrix_power(A, 2) >>> A2.todense() array([[1, 0, 1], [0, 2, 0], [1, 0, 1]]) >>> A4 = sparse.linalg.matrix_power(A, 4) >>> A4.todense() array([[2, 0, 2], [0, 4, 0], [2, 0, 2]])", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:matrix_power arg:A arg:power arguments arg arg Assign If Compare Raise Call If Call Assign Call If Compare Raise Call If Compare Return return:yes Call If Compare Return return:yes Call Assign Call If Return return:yes Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_get_input_shape_for_tensor", + "source_code": "def _get_input_shape_for_tensor(self, tensor, feature, per_replica, path) -> TensorShape:\n shape = tensor.shape.as_list()\n if len(shape) < 1:\n raise ValueError('Only rank 1 and above dense tensor is supported, find rank {} sparse tensor for input {}'.format(len(shape), path))\n if len(shape) > 1 and shape[-1] != 1:\n raise ValueError('Rank 2 or above dense tensor should have last dimension as 1 as the last dimension will always be reduced. Instead got dense tensor as shape {}'.format(shape))\n if self._num_cores_per_replica and per_replica:\n shape[0] = shape[0] // self._num_cores_per_replica\n return TensorShape(shape)", + "docstring": "Get the input shape for the dense tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_get_input_shape_for_tensor arg:self arg:tensor arg:feature arg:per_replica arg:path arguments arg arg arg arg arg Assign Call If Compare Call Raise Call Call Call If BoolOp Compare Call Compare Raise Call Call If BoolOp Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "minimum", + "source_code": "def minimum(self, other):\n return self._maximum_minimum(other, np.minimum)", + "docstring": "Element-wise minimum between this and another array/matrix.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:minimum arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "ViewcodeAnchorTransform", + "source_code": "class ViewcodeAnchorTransform(SphinxPostTransform):\n default_priority = 100\n\n def run(self, **kwargs: Any) -> None:\n if is_supported_builder(self.app.builder):\n self.convert_viewcode_anchors()\n else:\n self.remove_viewcode_anchors()\n\n def convert_viewcode_anchors(self) -> None:\n for node in self.document.findall(viewcode_anchor):\n anchor = nodes.inline('', _('[source]'), classes=['viewcode-link'])\n refnode = make_refnode(self.app.builder, node['refdoc'], node['reftarget'], node['refid'], anchor)\n node.replace_self(refnode)\n\n def remove_viewcode_anchors(self) -> None:\n for node in list(self.document.findall(viewcode_anchor)):\n node.parent.remove(node)", + "docstring": "Convert or remove viewcode_anchor nodes depends on builder.", + "type": "class", + "file_path": "sphinx\\sphinx\\ext\\viewcode.py", + "ast_data": "ClassDef name:ViewcodeAnchorTransform Assign FunctionDef name:run arg:self arguments arg arg If Call Call Call FunctionDef name:convert_viewcode_anchors arg:self arguments arg For Call Assign Call Call Assign Call Call FunctionDef name:remove_viewcode_anchors arg:self arguments arg For Call Call Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, image_encoder: ImageEncoderViT | TinyViT, prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder) -> None:\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder", + "docstring": "SAM predicts object masks from an image and input prompts. Args: image_encoder: The backbone used to encode the image into image embeddings that allow for efficient mask prediction. prompt_encoder: Encodes various types of input prompts. mask_decoder: Predicts masks from the image embeddings and encoded prompts.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:image_encoder arg:prompt_encoder arg:mask_decoder arguments arg arg arg arg Call Call Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n if eval_gradient:\n K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n return (K1 + K2, np.dstack((K1_gradient, K2_gradient)))\n else:\n return self.k1(X, Y) + self.k2(X, Y)", + "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Call Assign Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "caching_allocator_alloc", + "source_code": "def caching_allocator_alloc(size, device: 'Device'=None, stream=None):\n if device is None:\n device = torch.cuda.current_device()\n device = _get_device_index(device)\n if stream is None:\n stream = torch.cuda.current_stream(device)\n if isinstance(stream, torch.cuda.streams.Stream):\n stream = stream.cuda_stream\n if not isinstance(stream, int):\n raise TypeError('Invalid type for stream argument, must be `torch.cuda.Stream` or `int` representing a pointer to a existing stream')\n with torch.cuda.device(device):\n return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)", + "docstring": "Perform a memory allocation using the CUDA memory allocator. Memory is allocated for a given device and a stream, this function is intended to be used for interoperability with other frameworks. Allocated memory is released through :func:. Args: size (int): number of bytes to be allocated. device (torch.device or int, optional): selected device. If it is `cuda-memory-management` for more details about GPU memory management.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:caching_allocator_alloc arg:size arg:device arg:stream arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Call If Call Assign If Call Raise Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "PostProcessing", + "source_code": "def PostProcessing(self):\n for _, grad_state in self._map.items():\n for _, b_merge in grad_state.switch_map.items():\n if b_merge.op.inputs[0] == b_merge.op.inputs[1]:\n dtype = b_merge.op.inputs[0].dtype\n shape = b_merge.op.inputs[0].get_shape()\n if shape.is_fully_defined():\n grad_state.grad_context.Enter()\n grad_val = constant_op.constant(0, dtype=dtype, shape=shape)\n next_grad_val = control_flow_ops._NextIteration(grad_val)\n grad_state.grad_context.Exit()\n else:\n outer_grad_ctxt = grad_state.grad_context.outer_context\n if outer_grad_ctxt:\n outer_grad_ctxt.Enter()\n enter_grad_op = b_merge.op.inputs[0].op\n enter_grad = enter_grad_op.inputs[0]\n grad_shape = array_ops.shape_internal(enter_grad, optimize=False)\n grad_val = array_ops.zeros(grad_shape)\n if outer_grad_ctxt:\n outer_grad_ctxt.Exit()\n grad_state.grad_context.Enter()\n next_grad_val = control_flow_ops._NextIteration(grad_val)\n grad_state.grad_context.Exit()\n b_merge.op._update_input(1, next_grad_val)", + "docstring": "Perform postprocessing at the end of gradients(). We have created the gradient graph at this point. So this function can be used to perform any postprocessing on the gradient graph. We currently perform the following postprocessing: 1. Patch the gradient graph if the output of a loop variable doesn't depend on its input.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:PostProcessing arg:self arguments arg For Call For Call If Compare Assign Assign Call If Call Call Assign Call Assign Call Call Assign If Call Assign Assign Assign Call Assign Call If Call Call Assign Call Call Call" + }, + { + "library": "cherrypy", + "name": "header_elements", + "source_code": "def header_elements(fieldname, fieldvalue):\n if not fieldvalue:\n return []\n result = []\n for element in RE_HEADER_SPLIT.split(fieldvalue):\n if fieldname.startswith('Accept') or fieldname == 'TE':\n hv = AcceptElement.from_str(element)\n else:\n hv = HeaderElement.from_str(element)\n result.append(hv)\n return list(reversed(sorted(result)))", + "docstring": "Return a sorted :class: list. Constucted from a comma-separated header string.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:header_elements arg:fieldname arg:fieldvalue arguments arg arg If Return return:no Assign For Call If BoolOp Call Compare Assign Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "power", + "source_code": "def power(self, n, dtype=None):\n if not isscalarlike(n):\n raise NotImplementedError('input is not scalar')\n if not n:\n raise NotImplementedError('zero power is not supported as it would densify the matrix.\\nUse `np.ones(A.shape, dtype=A.dtype)` for this case.')\n data = self._deduped_data()\n if dtype is not None:\n data = data.astype(dtype, copy=False)\n return self._with_data(data ** n)", + "docstring": "This function performs element-wise power. Parameters ---------- n : scalar n is a non-zero scalar (nonzero avoids dense ones creation) If zero power is desired, special case it to use dtype : If dtype is not specified, the current dtype will be preserved. Raises ------ NotImplementedError : if n is a zero scalar If zero power is desired, special case it to use ``", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_data.py", + "ast_data": "FunctionDef name:power arg:self arg:n arg:dtype arguments arg arg arg If Call Raise Call If Raise Call Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Shekel05", + "source_code": "class Shekel05(Benchmark):\n\n def __init__(self, dimensions=4):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[4.00003715092, 4.00013327435, 4.00003714871, 4.0001332742]]\n self.fglob = -10.1531996791\n self.A = asarray([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, 1.0], [8.0, 8.0, 8.0, 8.0], [6.0, 6.0, 6.0, 6.0], [3.0, 7.0, 3.0, 7.0]])\n self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4])\n\n def fun(self, x, *args):\n self.nfev += 1\n return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))", + "docstring": "Shekel 5 objective function. This class defines the Shekel 5 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shekel05}}(x) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }nx_i \\in [0, 10]i = 1, ..., 4f(x) = -10.15319585x_i = 4i = 1, ..., 4` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: this is a different global minimum compared to Jamil#130. The minimum is found by doing lots of optimisations. The solution is supposed to be at [4] * N, is there any numerical overflow?", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Shekel05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "parameters", + "source_code": "@property\ndef parameters(self) -> Mapping[str, Any]:\n return super().parameters", + "docstring": "Returns an ordered mapping of parameter name to specification.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:parameters arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_with_num_row_partitions", + "source_code": "def _with_num_row_partitions(self, new_num_row_partitions: int) -> 'DynamicRaggedShape.Spec':\n rank = self.rank\n if rank is None:\n raise ValueError('Changing num_row_partitions with unknown rank unsupported')\n if new_num_row_partitions > max(rank - 1, 0):\n raise ValueError('Number of row partitions too large')\n if new_num_row_partitions < 0:\n raise ValueError('Number of row partitions negative')\n if self.num_row_partitions == new_num_row_partitions:\n return self\n elif self.num_row_partitions < new_num_row_partitions:\n rp_delta = new_num_row_partitions - self.num_row_partitions\n tail_shape = DynamicRaggedShape.Spec._from_tensor_shape(self._static_inner_shape, rp_delta, self.dtype)\n return DynamicRaggedShape.Spec(row_partitions=self._row_partitions + tail_shape._row_partitions, static_inner_shape=tail_shape._static_inner_shape, dtype=self.dtype)\n else:\n assert self.num_row_partitions > new_num_row_partitions\n new_row_partitions = self._row_partitions[:new_num_row_partitions]\n last_row_partition = new_row_partitions[-1]\n old_row_partitions = self._row_partitions[new_num_row_partitions:]\n new_static_inner_shape = tensor_shape.TensorShape([last_row_partition.nvals] + [x.uniform_row_length for x in old_row_partitions]) + self._static_inner_shape[1:]\n return DynamicRaggedShape.Spec(new_row_partitions, new_static_inner_shape, self.dtype)", + "docstring": "Change the number of row partitions in the spec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_with_num_row_partitions arg:self arg:new_num_row_partitions arguments arg arg Assign If Compare Raise Call If Compare Call Raise Call If Compare Raise Call If Compare Return return:yes If Compare Assign Assign Call Return return:yes Call Compare Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "as_shape", + "source_code": "def as_shape(shape) -> 'TensorShape':\n if isinstance(shape, TensorShape):\n return shape\n else:\n return TensorShape(shape)", + "docstring": "Converts the given object to a TensorShape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:as_shape arg:shape arguments arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "__iter__", + "source_code": "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", + "docstring": "Iterate over coordinates of this Point.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call" + }, + { + "library": "tensorflow", + "name": "shape", + "source_code": "@property\ndef shape(self):\n raise NotImplementedError", + "docstring": "The of this variable. Returns: A .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Raise" + }, + { + "library": "scipy", + "name": "find", + "source_code": "def find(A):\n A = coo_array(A, copy=True)\n A.sum_duplicates()\n nz_mask = A.data != 0\n return (A.row[nz_mask], A.col[nz_mask], A.data[nz_mask])", + "docstring": "Return the indices and values of the nonzero elements of a matrix Parameters ---------- A : dense or sparse array or matrix Matrix whose nonzero elements are desired. Returns ------- (I,J,V) : tuple of arrays I,J, and V contain the row indices, column indices, and values of the nonzero entries. Examples -------- >>> from scipy.sparse import csr_array, find >>> A = csr_array([[7.0, 8.0, 0],[0, 0, 9.0]]) >>> find(A) (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.]))", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_extract.py", + "ast_data": "FunctionDef name:find arg:A arguments arg Assign Call Call Assign Compare Return return:yes" + }, + { + "library": "kornia", + "name": "right_jacobian", + "source_code": "@staticmethod\ndef right_jacobian(vec: Tensor) -> Tensor:\n R_skew = vector_to_skew_symmetric_matrix(vec)\n theta = vec.norm(dim=-1, keepdim=True)[..., None]\n I = eye(3, device=vec.device, dtype=vec.dtype)\n Jr = I - (1 - theta.cos()) / theta ** 2 * R_skew + (theta - theta.sin()) / theta ** 3 * (R_skew @ R_skew)\n return Jr", + "docstring": "Compute the right Jacobian of So3. Args: vec: the input point of shape :math:. Example: >>> vec = torch.tensor([1., 2., 3.]) >>> So3.right_jacobian(vec) tensor([[-0.0687, 0.5556, -0.0141], [-0.2267, 0.1779, 0.6236], [ 0.5074, 0.3629, 0.5890]])", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:right_jacobian arg:vec arguments arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "PyperclipException", + "source_code": "class PyperclipException(RuntimeError):\n pass", + "docstring": "Exception raised when clipboard functionality is unsupported. Raised by ``.", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:PyperclipException" + }, + { + "library": "pandas", + "name": "render_pep440_old", + "source_code": "def render_pep440_old(pieces):\n if pieces['closest-tag']:\n rendered = pieces['closest-tag']\n if pieces['distance'] or pieces['dirty']:\n rendered += f'0.post{pieces['distance']}'\n if pieces['dirty']:\n rendered += '.dev0'\n else:\n rendered = f'0.post{pieces['distance']}'\n if pieces['dirty']:\n rendered += '.dev0'\n return rendered", + "docstring": "TAG[.postDISTANCE[.dev0]] . The \".dev0\" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:render_pep440_old arg:pieces arguments arg If Assign If BoolOp If Assign If Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_params", + "source_code": "def get_params(self, deep=True):\n return super()._get_params('estimators', deep=deep)", + "docstring": "Get the parameters of an estimator from the ensemble. Returns the parameters given in the constructor as well as the estimators contained within the parameter. Parameters ---------- deep : bool, default=True Setting it to True gets the various estimators and the parameters of the estimators as well. Returns ------- params : dict Parameter and estimator names mapped to their values or parameter names mapped to their values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "rotate_around", + "source_code": "def rotate_around(self, x, y, theta):\n return self.translate(-x, -y).rotate(theta).translate(x, y)", + "docstring": "Add a rotation (in radians) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:rotate_around arg:self arg:x arg:y arg:theta arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "ObserverBase", + "source_code": "class ObserverBase(ABC, nn.Module):\n\n def __init__(self, dtype, is_dynamic: bool=False):\n super().__init__()\n self.dtype = dtype\n self.is_dynamic = is_dynamic\n\n @abstractmethod\n def forward(self, x):\n pass\n\n @abstractmethod\n def calculate_qparams(self, **kwargs):\n pass\n with_args = classmethod(_with_args)\n with_callable_args = classmethod(_with_callable_args)", + "docstring": "Base observer Module. Any observer implementation should derive from this class. Concrete observers should follow the same API. In forward, they will update the statistics of the observed Tensor. And they should provide a function that computes the quantization parameters given the collected statistics. Args: dtype: dtype argument to the node needed to implement the reference model spec. is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization or static quantization", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "ClassDef name:ObserverBase FunctionDef name:__init__ arg:self arg:dtype arg:is_dynamic arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arguments arg arg FunctionDef name:calculate_qparams arg:self arguments arg arg Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "ScatterDimensionNumbers", + "source_code": "class ScatterDimensionNumbers:\n __slots__ = ('update_window_dims', 'inserted_window_dims', 'scatter_dims_to_operand_dims', 'index_vector_dim')\n\n def __init__(self):\n self.update_window_dims = []\n self.inserted_window_dims = []\n self.scatter_dims_to_operand_dims = []\n self.index_vector_dim = 0", + "docstring": "Python representation of a xla.ScatterDimensionNumbers protobuf.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py", + "ast_data": "ClassDef name:ScatterDimensionNumbers Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "evaluate_expr", + "source_code": "def evaluate_expr(self, orig_expr: sympy.Basic, hint: Optional[Union[int, bool, float]]=None, fx_node: Optional[torch.fx.Node]=None, size_oblivious: bool=False, fallback_value: Optional[bool]=None, *, forcing_spec: bool=False) -> sympy.Basic:\n suppress_guards_tls = ShapeEnv._suppress_guards_tls()\n return self._inner_evaluate_expr(orig_expr, hint, fx_node, size_oblivious, forcing_spec, suppress_guards_tls, fallback_value)", + "docstring": "Given an expression, evaluates it, adding guards if necessary When fallback_value is not None the function return fallback_value instead of failing with data dependent error.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:evaluate_expr arg:self arg:orig_expr arg:hint arg:fx_node arg:size_oblivious arg:fallback_value arguments arg arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "force_static", + "source_code": "def force_static(self):\n if isinstance(self.__variable, SymNodeVariable):\n self.__variable.evaluate_expr()\n elif isinstance(self.__variable, ConstantVariable):\n pass\n else:\n raise AssertionError(f'cannot force {self.__variable} ({type(self.__variable)}) static')", + "docstring": "Forces that a value is static, inducing a guard on its specific value", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:force_static arg:self arguments arg If Call Call If Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "reset_memory_stats", + "source_code": "def reset_memory_stats(self, dev):\n self._initialize_physical_devices()\n self.ensure_initialized()\n pywrap_tfe.TFE_ResetMemoryStats(self._context_handle, dev)", + "docstring": "Resets the tracked memory stats for the device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:reset_memory_stats arg:self arg:dev arguments arg arg Call Call Call" + }, + { + "library": "pytorch", + "name": "_validate_structured_pruning", + "source_code": "def _validate_structured_pruning(t):\n shape = t.shape\n if len(shape) <= 1:\n raise ValueError(f'Structured pruning can only be applied to multidimensional tensors. Found tensor of shape {shape} with {len(shape)} dims')", + "docstring": "Validate that the tensor to be pruned is at least 2-Dimensional. Validation helper to check that the tensor to be pruned is multi- dimensional, such that the concept of \"channels\" is well-defined. Args: t (torch.Tensor): tensor representing the parameter to prune Raises: ValueError: if the tensor is not at least 2D.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:_validate_structured_pruning arg:t arguments arg Assign If Compare Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n return f'Shard(dim={self.dim})'", + "docstring": "machine readable representation of the Shard placement", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_all_points_on_plane", + "source_code": "def _all_points_on_plane(xs, ys, zs, atol=1e-08):\n xs, ys, zs = (np.asarray(xs), np.asarray(ys), np.asarray(zs))\n points = np.column_stack([xs, ys, zs])\n points = points[~np.isnan(points).any(axis=1)]\n points = np.unique(points, axis=0)\n if len(points) <= 3:\n return True\n vs = (points - points[0])[1:]\n vs = vs / np.linalg.norm(vs, axis=1)[:, np.newaxis]\n vs = np.unique(vs, axis=0)\n if len(vs) <= 2:\n return True\n cross_norms = np.linalg.norm(np.cross(vs[0], vs[1:]), axis=1)\n zero_cross_norms = np.where(np.isclose(cross_norms, 0, atol=atol))[0] + 1\n vs = np.delete(vs, zero_cross_norms, axis=0)\n if len(vs) <= 2:\n return True\n n = np.cross(vs[0], vs[1])\n n = n / np.linalg.norm(n)\n dots = np.dot(n, vs.transpose())\n return np.allclose(dots, 0, atol=atol)", + "docstring": "Check if all points are on the same plane. Note that NaN values are ignored. Parameters ---------- xs, ys, zs : array-like The x, y, and z coordinates of the points. atol : float, default: 1e-8 The tolerance for the equality check.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_all_points_on_plane arg:xs arg:ys arg:zs arg:atol arguments arg arg arg arg Assign Call Call Call Assign Call Assign Call Call Assign Call If Compare Call Return return:yes Assign Assign Call Assign Call If Compare Call Return return:yes Assign Call Call Assign Call Call Assign Call If Compare Call Return return:yes Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "prevent_feeding", + "source_code": "def prevent_feeding(self, tensor) -> None:\n self._unfeedable_tensors.add(tensor)", + "docstring": "Marks the given as unfeedable in this graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:prevent_feeding arg:self arg:tensor arguments arg arg Call" + }, + { + "library": "pygame", + "name": "_addfont", + "source_code": "def _addfont(name, bold, italic, font, fontdict):\n if name not in fontdict:\n fontdict[name] = {}\n fontdict[name][bold, italic] = font", + "docstring": "insert a font and style into the font dictionary", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:_addfont arg:name arg:bold arg:italic arg:font arg:fontdict arguments arg arg arg arg arg If Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "library_paths", + "source_code": "def library_paths(device_type: str='cpu') -> list[str]:\n paths = [TORCH_LIB_PATH]\n if device_type == 'cuda' and IS_HIP_EXTENSION:\n lib_dir = 'lib'\n paths.append(_join_rocm_home(lib_dir))\n if HIP_HOME is not None:\n paths.append(os.path.join(HIP_HOME, 'lib'))\n elif device_type == 'cuda':\n if IS_WINDOWS:\n lib_dir = os.path.join('lib', 'x64')\n else:\n lib_dir = 'lib64'\n if not os.path.exists(_join_cuda_home(lib_dir)) and os.path.exists(_join_cuda_home('lib')):\n lib_dir = 'lib'\n paths.append(_join_cuda_home(lib_dir))\n if CUDNN_HOME is not None:\n paths.append(os.path.join(CUDNN_HOME, lib_dir))\n elif device_type == 'xpu':\n if IS_WINDOWS:\n lib_dir = os.path.join('lib', 'x64')\n else:\n lib_dir = 'lib64'\n if not os.path.exists(_join_sycl_home(lib_dir)) and os.path.exists(_join_sycl_home('lib')):\n lib_dir = 'lib'\n paths.append(_join_sycl_home(lib_dir))\n return paths", + "docstring": "Get the library paths required to build a C++ or CUDA extension. Args: device_type: Defaults to \"cpu\". Returns: A list of library path strings.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\cpp_extension.py", + "ast_data": "FunctionDef name:library_paths arg:device_type arguments arg Assign If BoolOp Compare Assign Call Call If Compare Call Call If Compare If Assign Call Assign If BoolOp Call Call Call Call Assign Call Call If Compare Call Call If Compare If Assign Call Assign If BoolOp Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_size", + "source_code": "@tf_export('sets.size', v1=['sets.size', 'sets.set_size'])\n@dispatch.add_dispatch_support\ndef set_size(a, validate_indices=True):\n a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n if not isinstance(a, sparse_tensor.SparseTensor):\n raise TypeError('Expected `SparseTensor`, got %s.' % a)\n if a.values.dtype.base_dtype not in _VALID_DTYPES:\n raise TypeError(f'Invalid dtype `{a.values.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.')\n return gen_set_ops.set_size(a.indices, a.values, a.dense_shape, validate_indices)", + "docstring": "Compute number of unique elements along last dimension of . Args: a: , with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in . Note that setting this to allows for undefined behavior when calling this function with invalid indices. Returns: of set sizes. For ranked , this is a with rank , and the same 1st dimensions as . Each value is the number of unique elements in the corresponding dimension of . Raises: TypeError: If is an invalid types.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py", + "ast_data": "FunctionDef name:set_size arg:a arg:validate_indices arguments arg arg Assign Call If Call Raise Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "__next__", + "source_code": "def __next__(self):\n chunk = self.input.read(self.chunkSize)\n if chunk:\n return chunk\n else:\n if hasattr(self.input, 'close'):\n self.input.close()\n raise StopIteration()", + "docstring": "Return next chunk of file.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:__next__ arg:self arguments arg Assign Call If Return return:yes If Call Call Raise Call" + }, + { + "library": "scrapy", + "name": "MarshalItemExporter", + "source_code": "class MarshalItemExporter(BaseItemExporter):\n\n def __init__(self, file: BytesIO, **kwargs: Any):\n super().__init__(**kwargs)\n self.file: BytesIO = file\n\n def export_item(self, item: Any) -> None:\n marshal.dump(dict(self._get_serialized_fields(item)), self.file)", + "docstring": "Exports items in a Python-specific binary format (see :mod:). :param file: The file-like object to use for exporting the data. Its `bytes~io.BytesIO` object, etc)", + "type": "class", + "file_path": "scrapy\\scrapy\\exporters.py", + "ast_data": "ClassDef name:MarshalItemExporter FunctionDef name:__init__ arg:self arg:file arguments arg arg arg Call Call FunctionDef name:export_item arg:self arg:item arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "_BoolCodec", + "source_code": "class _BoolCodec:\n\n def can_encode(self, pyobj):\n return isinstance(pyobj, bool)\n\n def do_encode(self, bool_value, encode_fn):\n del encode_fn\n value = struct_pb2.StructuredValue()\n value.bool_value = bool_value\n return value\n\n def can_decode(self, value):\n return value.HasField('bool_value')\n\n def do_decode(self, value, decode_fn):\n del decode_fn\n return value.bool_value", + "docstring": "Codec for booleans.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "ClassDef name:_BoolCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:bool_value arg:encode_fn arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "save_source_file", + "source_code": "def save_source_file(self, module_name: str, file_or_directory: str, dependencies=True):\n path = Path(file_or_directory)\n if path.is_dir():\n to_save = []\n module_path = module_name.replace('.', '/')\n for filename in path.glob('**/*.py'):\n relative_path = filename.relative_to(path).as_posix()\n archivename = module_path + '/' + relative_path\n submodule_name = None\n if filename.name == '__init__.py':\n submodule_name = archivename[:-len('/__init__.py')].replace('/', '.')\n is_package = True\n else:\n submodule_name = archivename[:-len('.py')].replace('/', '.')\n is_package = False\n to_save.append((submodule_name, _read_file(str(filename)), is_package, dependencies))\n for item in to_save:\n self.save_source_string(*item)\n else:\n is_package = path.name == '__init__.py'\n self.save_source_string(module_name, _read_file(file_or_directory), is_package, dependencies)", + "docstring": "Adds the local file system `save_source_file`, we scan the source for dependencies.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:save_source_file arg:self arg:module_name arg:file_or_directory arg:dependencies arguments arg arg arg arg Assign Call If Call Assign Assign Call For Call Assign Call Call Assign Assign If Compare Assign Call Call Assign Assign Call Call Assign Call Call Call For Call Assign Compare Call Call" + }, + { + "library": "pytorch", + "name": "find_partition", + "source_code": "def find_partition(self, id: str) -> GraphInfo | None:\n if id == self.id:\n return self\n current_length = len(self.id)\n if len(id) > current_length:\n if id[current_length] == '0' and self.upper_graph_info is not None:\n return self.upper_graph_info.find_partition(id)\n elif id[current_length] == '1' and self.lower_graph_info is not None:\n return self.lower_graph_info.find_partition(id)\n return None", + "docstring": "Find the object with the given id.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:find_partition arg:self arg:id arguments arg arg If Compare Return return:yes Assign Call If Compare Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "get_element_from_tensor_info", + "source_code": "def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None):\n graph = graph or ops.get_default_graph()\n return graph.as_graph_element(ops.prepend_name_scope(tensor_info.name, import_scope=import_scope))", + "docstring": "Returns the element in the graph described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing an Op or Tensor by name. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in are prefixed with this string before lookup. Returns: Op or tensor in described by . Raises: KeyError: If does not correspond to an op or tensor in", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py", + "ast_data": "FunctionDef name:get_element_from_tensor_info arg:tensor_info arg:graph arg:import_scope arguments arg arg arg Assign BoolOp Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "ndenumerate", + "source_code": "@set_module('numpy')\nclass ndenumerate:\n\n def __init__(self, arr):\n self.iter = np.asarray(arr).flat\n\n def __next__(self):\n return (self.iter.coords, next(self.iter))\n\n def __iter__(self):\n return self", + "docstring": "Multidimensional index iterator. Return an iterator yielding pairs of array coordinates and values. Parameters ---------- arr : ndarray Input array. See Also -------- ndindex, flatiter Examples -------- >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4", + "type": "class", + "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py", + "ast_data": "ClassDef name:ndenumerate FunctionDef name:__init__ arg:self arg:arr arguments arg arg Assign Call FunctionDef name:__next__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "has_tensor", + "source_code": "def has_tensor(obj: object) -> bool:\n obj_id = id(obj)\n if obj_id in seen_ids:\n return seen_ids[obj_id]\n seen_ids[obj_id] = False\n if isinstance(obj, (torch.Tensor, torch.nn.Module)) or (istype(obj, type) and issubclass(obj, torch.nn.Module)):\n seen_ids[obj_id] = True\n return seen_ids[obj_id]\n elif config.trace_numpy and np and (istype(obj, np.ndarray) or isinstance(obj, np.generic)):\n seen_ids[obj_id] = True\n return seen_ids[obj_id]\n elif istype(obj, (list, tuple)):\n seen_ids[obj_id] = any((has_tensor(v) for v in obj))\n return seen_ids[obj_id]\n elif istype(obj, dict):\n values = list(obj.values())\n seen_ids[obj_id] = any((has_tensor(v) for v in values))\n return seen_ids[obj_id]\n elif istype(obj, (str, int, float, type(None), bool)):\n seen_ids[obj_id] = False\n return seen_ids[obj_id]\n elif is_namedtuple(obj) and hasattr(obj, '_fields'):\n seen_ids[obj_id] = any((has_tensor(getattr(obj, v)) for v in obj._fields))\n return seen_ids[obj_id]\n else:\n return False", + "docstring": "Recursively check if the obj has a tensor", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py", + "ast_data": "FunctionDef name:has_tensor arg:obj arguments arg Assign Call If Compare Return return:yes Assign If BoolOp Call BoolOp Call Call Assign Return return:yes If BoolOp BoolOp Call Call Assign Return return:yes If Call Assign Call Call Return return:yes If Call Assign Call Call Assign Call Call Return return:yes If Call Call Assign Return return:yes If BoolOp Call Call Assign Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "get_chunks", + "source_code": "def get_chunks(self, n_chunks: int | None=None) -> Iterable[PandasDataFrameXchg]:\n if n_chunks and n_chunks > 1:\n size = len(self._df)\n step = size // n_chunks\n if size % n_chunks != 0:\n step += 1\n for start in range(0, step * n_chunks, step):\n yield PandasDataFrameXchg(self._df.iloc[start:start + step, :], allow_copy=self._allow_copy)\n else:\n yield self", + "docstring": "Return an iterator yielding the chunks.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe.py", + "ast_data": "FunctionDef name:get_chunks arg:self arg:n_chunks arguments arg arg If BoolOp Compare Assign Call Assign If Compare For Call Call" + }, + { + "library": "matplotlib", + "name": "is_saving", + "source_code": "def is_saving(self):\n return self._is_saving", + "docstring": "Return whether the renderer is in the process of saving to a file, rather than rendering for an on-screen buffer.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:is_saving arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "log_every_n", + "source_code": "@tf_export(v1=['logging.log_every_n'])\ndef log_every_n(level, msg, n, *args):\n count = _GetNextLogCountPerToken(_GetFileAndLine())\n log_if(level, msg, not count % n, *args)", + "docstring": "Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py", + "ast_data": "FunctionDef name:log_every_n arg:level arg:msg arg:n arguments arg arg arg arg Assign Call Call Call Call" + }, + { + "library": "scipy", + "name": "_standard_rvs", + "source_code": "def _standard_rvs(self, n, shape, dim, df, random_state):\n n_tril = dim * (dim - 1) // 2\n covariances = random_state.normal(size=n * n_tril).reshape(shape + (n_tril,))\n variances = np.r_[[random_state.chisquare(df - (i + 1) + 1, size=n) ** 0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T\n A = np.zeros(shape + (dim, dim))\n size_idx = tuple([slice(None, None, None)] * len(shape))\n tril_idx = np.tril_indices(dim, k=-1)\n A[size_idx + tril_idx] = covariances\n diag_idx = np.diag_indices(dim)\n A[size_idx + diag_idx] = variances\n return A", + "docstring": "Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_standard_rvs arg:self arg:n arg:shape arg:dim arg:df arg:random_state arguments arg arg arg arg arg arg Assign Assign Call Call Assign Call Call Call Assign Call Assign Call Call Call Assign Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "DeterminePeakMemoryUsage", + "source_code": "def DeterminePeakMemoryUsage(self, item):\n return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster)", + "docstring": "Returns a snapshot of the peak memory usage. Args: item: The item for which to measure the costs. Returns: A hashtable indexed by device name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py", + "ast_data": "FunctionDef name:DeterminePeakMemoryUsage arg:self arg:item arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_env_variable_or_raise", + "source_code": "def get_env_variable_or_raise(env_name: str) -> str:\n value = os.environ.get(env_name, None)\n if value is None:\n msg = f'Environment variable {env_name} expected, but not set'\n raise ValueError(msg)\n return value", + "docstring": "Tries to retrieve environment variable. Raises `` if no environment variable found. Args: env_name (str): Name of the env variable", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\api.py", + "ast_data": "FunctionDef name:get_env_variable_or_raise arg:env_name arguments arg Assign Call If Compare Assign Raise Call Return return:yes" + }, + { + "library": "numpy", + "name": "chebgrid3d", + "source_code": "def chebgrid3d(x, y, z, c):\n return pu._gridnd(chebval, c, x, y, z)", + "docstring": "Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- chebval, chebval2d, chebgrid2d, chebval3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebgrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "pg_group_ranks", + "source_code": "@property\ndef pg_group_ranks(self) -> dict[ProcessGroup, dict[int, int]]:\n global _pg_group_ranks\n return _pg_group_ranks", + "docstring": "Process group's global rank to local rank mapping. TODO don't expose the map, expose fine grained ops", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:pg_group_ranks arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "summary", + "source_code": "def summary(self, line_length=None, positions=None, print_fn=None):\n if not self.built:\n raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.')\n layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn)", + "docstring": "Prints a string summary of the network. Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to . print_fn: Print function to use. Defaults to . It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if is called before the model is built.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:summary arg:self arg:line_length arg:positions arg:print_fn arguments arg arg arg arg If Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_as_row_partitions", + "source_code": "def _as_row_partitions(self):\n rank = self.rank\n if rank is None:\n raise ValueError('rank must be known for _as_row_partitions')\n elif rank < 1:\n raise ValueError('rank must be >= 1 for _as_row_partitions')\n fully_ragged = self._with_num_row_partitions(rank - 1)\n return fully_ragged.row_partitions", + "docstring": "Returns row partitions representing this shape. In order to represent a shape as row partitions, the rank of the shape must be known, and the shape must have rank at least one. Returns: A list of RowPartition objects. Raises: ValueError, if the shape cannot be represented by RowPartitions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_as_row_partitions arg:self arguments arg Assign If Compare Raise Call If Compare Raise Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "trstlp", + "source_code": "def trstlp(A, b, delta, g):\n num_constraints = A.shape[1]\n num_vars = A.shape[0]\n if DEBUGGING:\n assert num_vars >= 1\n assert num_constraints >= 0\n assert np.size(g) == num_vars\n assert np.size(b) == num_constraints\n assert delta > 0\n vmultc = np.zeros(num_constraints + 1)\n iact = np.zeros(num_constraints + 1, dtype=int)\n nact = 0\n d = np.zeros(num_vars)\n z = np.zeros((num_vars, num_vars))\n A_aug = np.hstack([A, g.reshape((num_vars, 1))])\n b_aug = np.hstack([b, 0])\n for i in range(num_constraints + 1):\n if (maxval := max(abs(A_aug[:, i]))) > 1000000000000.0:\n modscal = max(2 * REALMIN, 1 / maxval)\n A_aug[:, i] *= modscal\n b_aug[i] *= modscal\n iact[:num_constraints], nact, d, vmultc[:num_constraints], z = trstlp_sub(iact[:num_constraints], nact, 1, A_aug[:, :num_constraints], b_aug[:num_constraints], delta, d, vmultc[:num_constraints], z)\n iact, nact, d, vmultc, z = trstlp_sub(iact, nact, 2, A_aug, b_aug, delta, d, vmultc, z)\n if DEBUGGING:\n assert all(np.isfinite(d))\n assert np.linalg.norm(d) <= 2 * delta\n return d", + "docstring": "This function calculated an n-component vector d by the following two stages. In the first stage, d is set to the shortest vector that minimizes the greatest violation of the constraints A.T @ D = B. In other words, the A and B in our implementation are the negative of those in Powell's implementation. 1. The algorithm was NOT documented in the COBYLA paper. A note should be written to introduce it! 2. As a major part of the algorithm (see trstlp_sub), the code maintains and updates the QR factorization of A[iact[:nact]], i.e. the gradients of all the active (linear) constraints. The matrix Z is indeed Q, and the vector zdota is the diagonal of R. The factorization is updated by Givens rotations when an index is added in or removed from iact. 3. There are probably better algorithms available for the trust-region linear programming problem.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\trustregion.py", + "ast_data": "FunctionDef name:trstlp arg:A arg:b arg:delta arg:g arguments arg arg arg arg Assign Assign If Compare Compare Compare Call Compare Call Compare Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call For Call If Compare Call Call Assign Call Assign Call Assign Call If Call Call Compare Call Return return:yes" + }, + { + "library": "kornia", + "name": "RandomGaussianNoise", + "source_code": "class RandomGaussianNoise(IntensityAugmentationBase2D):\n\n def __init__(self, mean: float=0.0, std: float=1.0, same_on_batch: bool=False, p: float=0.5, keepdim: bool=False) -> None:\n super().__init__(p=p, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim)\n self.flags = {'mean': mean, 'std': std}\n\n def generate_parameters(self, shape: Tuple[int, ...]) -> Dict[str, Tensor]:\n return {}\n\n def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n if 'gaussian_noise' in params:\n gaussian_noise = params['gaussian_noise']\n else:\n gaussian_noise = _randn_like(input, mean=flags['mean'], std=flags['std'])\n self._params['gaussian_noise'] = gaussian_noise\n return input + gaussian_noise", + "docstring": "Add gaussian noise to a batch of multi-dimensional images. .. image:: _static/img/RandomGaussianNoise.png Args: mean: The mean of the gaussian distribution. std: The standard deviation of the gaussian distribution. same_on_batch: apply the same transformation across the batch. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Examples: >>> rng = torch.manual_seed(0) >>> img = torch.ones(1, 1, 2, 2) >>> RandomGaussianNoise(mean=0., std=1., p=1.)(img) tensor([[[[ 2.5410, 0.7066], [-1.1788, 1.5684]]]]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomGaussianNoise(mean=0., std=1., p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True)", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\gaussian_noise.py", + "ast_data": "ClassDef name:RandomGaussianNoise FunctionDef name:__init__ arg:self arg:mean arg:std arg:same_on_batch arg:p arg:keepdim arguments arg arg arg arg arg arg Call Call Assign FunctionDef name:generate_parameters arg:self arg:shape arguments arg arg Return return:no FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg If Compare Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "Callable", + "source_code": "@tf_export('types.experimental.Callable', v1=[])\nclass Callable(metaclass=abc.ABCMeta):\n\n @property\n @abc.abstractmethod\n def function_type(self) -> FunctionType:\n pass\n\n def __call__(self, *args, **kwargs):\n pass", + "docstring": "Base class for TF callables like those created by tf.function. Note: Callables are conceptually very similar to : a is a kind of callable.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:Callable FunctionDef name:function_type arg:self arguments arg FunctionDef name:__call__ arg:self arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "TracingContext", + "source_code": "class TracingContext(metaclass=abc.ABCMeta):\n pass", + "docstring": "Contains information scoped to the tracing of multiple objects. is a container class for flags and variables that have any kind of influence on the tracing behaviour of the class implementing the __tf_tracing_type__. This context will be shared across all __tf_tracing_type__ calls while constructing the TraceType for a particular set of objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py", + "ast_data": "ClassDef name:TracingContext" + }, + { + "library": "pytorch", + "name": "ZipperIterDataPipe", + "source_code": "@functional_datapipe('zip')\nclass ZipperIterDataPipe(IterDataPipe[tuple[_T_co]]):\n datapipes: tuple[IterDataPipe]\n\n def __init__(self, *datapipes: IterDataPipe):\n if not all((isinstance(dp, IterDataPipe) for dp in datapipes)):\n raise TypeError('All inputs are required to be `IterDataPipe` for `ZipIterDataPipe`.')\n super().__init__()\n self.datapipes = datapipes\n\n def __iter__(self) -> Iterator[tuple[_T_co]]:\n iterators = [iter(datapipe) for datapipe in self.datapipes]\n yield from zip(*iterators)\n\n def __len__(self) -> int:\n if all((isinstance(dp, Sized) for dp in self.datapipes)):\n return min((len(dp) for dp in self.datapipes))\n else:\n raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")", + "docstring": "Aggregates elements into a tuple from each of the input DataPipes (functional name: ``). The output is stopped as soon as the shortest input DataPipe is exhausted. Args: *datapipes: Iterable DataPipes being aggregated Example: >>> # xdoctest: +REQUIRES(module:torchdata) >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) >>> list(dp1.zip(dp2, dp3)) [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py", + "ast_data": "ClassDef name:ZipperIterDataPipe FunctionDef name:__init__ arg:self arguments arg arg If Call Call Raise Call Call Call Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call Call FunctionDef name:__len__ arg:self arguments arg If Call Call Return return:yes Call Call Raise Call Call Call" + }, + { + "library": "pytorch", + "name": "remove_custom", + "source_code": "def remove_custom(self, opset: OpsetVersion) -> None:\n if not self._functions.overridden(opset):\n warnings.warn(f\"No custom function registered for '{self._name}' opset {opset}\")\n return\n self._functions.remove_override(opset)", + "docstring": "Removes a custom symbolic function. Args: opset: The opset version of the custom function to remove.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py", + "ast_data": "FunctionDef name:remove_custom arg:self arg:opset arguments arg arg If Call Call Return return:no Call" + }, + { + "library": "django", + "name": "get_paginate_by", + "source_code": "def get_paginate_by(self, queryset):\n return self.paginate_by", + "docstring": "Get the number of items to paginate by, or `` for no pagination.", + "type": "method", + "file_path": "django\\django\\views\\generic\\list.py", + "ast_data": "FunctionDef name:get_paginate_by arg:self arg:queryset arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_LoadStatus", + "source_code": "class _LoadStatus:\n\n @abc.abstractmethod\n def assert_consumed(self):\n pass\n\n @abc.abstractmethod\n def assert_existing_objects_matched(self):\n pass\n\n @abc.abstractmethod\n def assert_nontrivial_match(self):\n pass\n\n @abc.abstractmethod\n def run_restore_ops(self, session=None):\n pass\n\n @abc.abstractmethod\n def initialize_or_restore(self, session=None):\n pass\n\n def expect_partial(self):\n return self", + "docstring": "Abstract base for load status callbacks.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "ClassDef name:_LoadStatus FunctionDef name:assert_consumed arg:self arguments arg FunctionDef name:assert_existing_objects_matched arg:self arguments arg FunctionDef name:assert_nontrivial_match arg:self arguments arg FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg FunctionDef name:expect_partial arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "RgbToLuv", + "source_code": "class RgbToLuv(Module):\n ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n def forward(self, image: torch.Tensor) -> torch.Tensor:\n return rgb_to_luv(image)", + "docstring": "Convert an image from RGB to Luv. The image data is assumed to be in the range of :math:. Luv color is computed using the D65 illuminant and Observer 2. Returns: Luv version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> luv = RgbToLuv() >>> output = luv(input) # 2x3x4x5 Reference: [1] [2] [3]", + "type": "class", + "file_path": "kornia\\kornia\\color\\luv.py", + "ast_data": "ClassDef name:RgbToLuv FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_tf_core_flatten", + "source_code": "def _tf_core_flatten(structure, expand_composites=False):\n if structure is None:\n return [None]\n expand_composites = bool(expand_composites)\n return _pywrap_utils.Flatten(structure, expand_composites)", + "docstring": "See comments for flatten() in tensorflow/python/util/nest.py.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py", + "ast_data": "FunctionDef name:_tf_core_flatten arg:structure arg:expand_composites arguments arg arg If Compare Return return:no Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_validate_arguments", + "source_code": "def _validate_arguments(num_mel_bins, sample_rate, lower_edge_hertz, upper_edge_hertz, dtype):\n if num_mel_bins <= 0:\n raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)\n if lower_edge_hertz < 0.0:\n raise ValueError('lower_edge_hertz must be non-negative. Got: %s' % lower_edge_hertz)\n if lower_edge_hertz >= upper_edge_hertz:\n raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' % (lower_edge_hertz, upper_edge_hertz))\n if not isinstance(sample_rate, tensor.Tensor):\n if sample_rate <= 0.0:\n raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)\n if upper_edge_hertz > sample_rate / 2:\n raise ValueError('upper_edge_hertz must not be larger than the Nyquist frequency (sample_rate / 2). Got %s for sample_rate: %s' % (upper_edge_hertz, sample_rate))\n if not dtype.is_floating:\n raise ValueError('dtype must be a floating point type. Got: %s' % dtype)", + "docstring": "Checks the inputs to linear_to_mel_weight_matrix.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\mel_ops.py", + "ast_data": "FunctionDef name:_validate_arguments arg:num_mel_bins arg:sample_rate arg:lower_edge_hertz arg:upper_edge_hertz arg:dtype arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call If Call If Compare Raise Call If Compare Raise Call If Raise Call" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, x):\n if self._cache_size == 0:\n return self._call(x)\n x_old, y_old = self._cached_x_y\n if x is x_old:\n return y_old\n y = self._call(x)\n self._cached_x_y = (x, y)\n return y", + "docstring": "Computes the transform .", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg If Compare Return return:yes Call Assign If Compare Return return:yes Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_in_control_flow", + "source_code": "def _is_in_control_flow(self, op):\n return control_flow_util.IsInCond(op)", + "docstring": "Returns true if the given op is inside a tf.cond or in tf.while_loop. Args: op: A tensorflow op that should be checked whether in control flow or not. Returns: A boolean value whether the op is in control flow or not.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_is_in_control_flow arg:self arg:op arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "edges", + "source_code": "@property\ndef edges(self):\n return self._edges", + "docstring": "The default value of for newly added cells using . Notes ----- This setting does currently only affect newly created cells using . To change existing cells, you have to set their edges explicitly:: for c in tab.get_celld().values(): c.visible_edges = 'horizontal'", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:edges arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "preprocess", + "source_code": "def preprocess(self):\n if not self.is_templatized:\n return\n with open(self.path, encoding='utf-8') as fp:\n src_data = fp.read()\n if self.domain == 'django':\n content = templatize(src_data, origin=self.path[2:])\n with open(self.work_path, 'w', encoding='utf-8') as fp:\n fp.write(content)", + "docstring": "Preprocess (if necessary) a translatable file before passing it to xgettext GNU gettext utility.", + "type": "method", + "file_path": "django\\django\\core\\management\\commands\\makemessages.py", + "ast_data": "FunctionDef name:preprocess arg:self arguments arg If Return return:no With Call Assign Call If Compare Assign Call With Call Call" + }, + { + "library": "tensorflow", + "name": "save_as_bf16", + "source_code": "@save_as_bf16.setter\ndef save_as_bf16(self, save_as_bf16):\n self._save_as_bf16 = save_as_bf16 and self.dtype == dtypes.float32", + "docstring": "Enables saving float32 as bfloat16.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py", + "ast_data": "FunctionDef name:save_as_bf16 arg:self arg:save_as_bf16 arguments arg arg Assign BoolOp Compare" + }, + { + "library": "tensorflow", + "name": "request_stop", + "source_code": "def request_stop(self):\n self._stop_requested = True", + "docstring": "Sets stop requested field. Hooks can use this function to request stop of iterations. checks whether this is called or not.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "FunctionDef name:request_stop arg:self arguments arg Assign" + }, + { + "library": "pytorch", + "name": "mark_mixed_dtype_allowed_computation_ops", + "source_code": "def mark_mixed_dtype_allowed_computation_ops(gm):\n for target in [aten.convolution.default, aten.addmm.default, aten.mm.default]:\n for node in gm.graph.find_nodes(op='call_function', target=target):\n mark_mixed_dtype(node)", + "docstring": "Mark convolutions/linear which we will binary fold even with mixed precision constants. We constant fold in the higher precision for better accuracy and then recover the original precision after.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\binary_folding.py", + "ast_data": "FunctionDef name:mark_mixed_dtype_allowed_computation_ops arg:gm arguments arg For For Call Call" + }, + { + "library": "django", + "name": "datetime_extract_sql", + "source_code": "def datetime_extract_sql(self, lookup_type, sql, params, tzname):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')", + "docstring": "Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:datetime_extract_sql arg:self arg:lookup_type arg:sql arg:params arg:tzname arguments arg arg arg arg arg Raise Call" + }, + { + "library": "django", + "name": "BaseYearArchiveView", + "source_code": "class BaseYearArchiveView(YearMixin, BaseDateListView):\n date_list_period = 'month'\n make_object_list = False\n\n def get_dated_items(self):\n year = self.get_year()\n date_field = self.get_date_field()\n date = _date_from_string(year, self.get_year_format())\n since = self._make_date_lookup_arg(date)\n until = self._make_date_lookup_arg(self._get_next_year(date))\n lookup_kwargs = {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n qs = self.get_dated_queryset(**lookup_kwargs)\n date_list = self.get_date_list(qs)\n if not self.get_make_object_list():\n qs = qs.none()\n return (date_list, qs, {'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date)})\n\n def get_make_object_list(self):\n return self.make_object_list", + "docstring": "Base view for a list of objects published in a given year. This requires subclassing to provide a response mixin.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:BaseYearArchiveView Assign Assign FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Assign Call Assign Call If Call Assign Call Return return:yes Call Call FunctionDef name:get_make_object_list arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "needleman_wunsch", + "source_code": "def needleman_wunsch(self, scores: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(scores, ['B', 'N', 'M'])\n b, n, m = scores.shape\n gap = 0.1\n nw_scores = scores - gap\n dev = scores.device\n nw_grid = torch.zeros(b, n + 1, m + 1, dtype=torch.float, device=dev)\n for i in range(n):\n for j in range(m):\n nw_grid[:, i + 1, j + 1] = torch.maximum(torch.maximum(nw_grid[:, i + 1, j], nw_grid[:, i, j + 1]), nw_grid[:, i, j] + nw_scores[:, i, j])\n return nw_grid[:, -1, -1]", + "docstring": "Batched implementation of the Needleman-Wunsch algorithm. The cost of the InDel operation is set to 0 by subtracting the gap penalty to the scores. Args: scores: a (B, N, M) Tensor containing the pairwise scores of the elements to match.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py", + "ast_data": "FunctionDef name:needleman_wunsch arg:self arg:scores arguments arg arg Call Assign Assign Assign Assign Assign Call For Call For Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_repr_data_resource_", + "source_code": "@final\ndef _repr_data_resource_(self):\n if config.get_option('display.html.table_schema'):\n data = self.head(config.get_option('display.max_rows'))\n as_json = data.to_json(orient='table')\n as_json = cast(str, as_json)\n return loads(as_json, object_pairs_hook=collections.OrderedDict)", + "docstring": "Not a real Jupyter special repr method, but we use the same naming convention.", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_repr_data_resource_ arg:self arguments arg If Call Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "imag", + "source_code": "@tf_export('math.imag', v1=['math.imag', 'imag'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('imag')\ndef imag(input, name=None):\n with ops.name_scope(name, 'Imag', [input]) as name:\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)", + "docstring": "Returns the imaginary part of a complex (or real) tensor. Given a tensor , this operation returns a tensor of type that is the imaginary part of each element in considered as a complex number. If is real, a tensor of all zeros is returned. For example: Args: input: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A of type or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:imag arg:input arg:name arguments arg arg With Call Assign Call If Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "na_accum_func", + "source_code": "def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:\n mask_a, mask_b = {np.cumprod: (1.0, np.nan), np.maximum.accumulate: (-np.inf, np.nan), np.cumsum: (0.0, np.nan), np.minimum.accumulate: (np.inf, np.nan)}[accum_func]\n assert values.dtype.kind not in 'mM'\n if skipna and (not issubclass(values.dtype.type, (np.integer, np.bool_))):\n vals = values.copy()\n mask = isna(vals)\n vals[mask] = mask_a\n result = accum_func(vals, axis=0)\n result[mask] = mask_b\n else:\n result = accum_func(values, axis=0)\n return result", + "docstring": "Cumulative function with skipna support. Parameters ---------- values : np.ndarray or ExtensionArray accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} skipna : bool Returns ------- np.ndarray or ExtensionArray", + "type": "function", + "file_path": "pandas\\pandas\\core\\nanops.py", + "ast_data": "FunctionDef name:na_accum_func arg:values arg:accum_func arguments arg arg arg Assign Compare If BoolOp Call Assign Call Assign Call Assign Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "validate_dataset_input", + "source_code": "def validate_dataset_input(x, y, sample_weight, validation_split=None):\n if y is not None:\n raise ValueError('You passed a dataset or dataset iterator (%s) as input `x` to your model. In that case, you should not specify a target (`y`) argument, since the dataset or dataset iterator generates both input data and target data. Received: %s' % (x, y))\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when input `x` is a dataset or a dataset iterator. Instead, youcan provide sample_weight as the third element of yourdataset, i.e. (inputs, targets, sample_weight). Received: x=%s, sample_weight=%s' % (x, sample_weight))\n if validation_split is not None and validation_split != 0.0:\n raise ValueError('`validation_split` argument is not supported when input `x` is a dataset or a dataset iterator. Received: x=%s, validation_split=%f' % (x, validation_split))", + "docstring": "Validates user input arguments when a dataset iterator is passed. Args: x: Input data. A dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be when is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in . Expected to be when is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be when is a dataset iterator. Raises: ValueError: if argument or or are provided by user.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:validate_dataset_input arg:x arg:y arg:sample_weight arg:validation_split arguments arg arg arg arg If Compare Raise Call If Compare Raise Call If BoolOp Compare Compare Raise Call" + }, + { + "library": "pandas", + "name": "_get_join_target", + "source_code": "@final\ndef _get_join_target(self) -> np.ndarray:\n if isinstance(self._values, BaseMaskedArray):\n return self._values._data\n elif isinstance(self._values, ArrowExtensionArray):\n return self._values.to_numpy()\n target = self._get_engine_target()\n if not isinstance(target, np.ndarray):\n raise ValueError('_can_use_libjoin should return False.')\n return target", + "docstring": "Get the ndarray or ExtensionArray that we can pass to the join functions.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_get_join_target arg:self arguments arg If Call Return return:yes If Call Return return:yes Call Assign Call If Call Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "check_case_sensitive", + "source_code": "def check_case_sensitive(self, name: str, schema: str | None) -> None:\n if not name.isdigit() and (not name.islower()):\n from sqlalchemy import inspect as sqlalchemy_inspect\n insp = sqlalchemy_inspect(self.con)\n table_names = insp.get_table_names(schema=schema or self.meta.schema)\n if name not in table_names:\n msg = f\"The provided table name '{name}' is not found exactly as such in the database after writing the table, possibly due to case sensitivity issues. Consider using lower case table names.\"\n warnings.warn(msg, UserWarning, stacklevel=find_stack_level())", + "docstring": "Checks table name for issues with case-sensitivity. Method is called after data is inserted.", + "type": "method", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:check_case_sensitive arg:self arg:name arg:schema arguments arg arg arg If BoolOp Call Call Assign Call Assign Call BoolOp If Compare Assign Call Call" + }, + { + "library": "django", + "name": "__str__", + "source_code": "def __str__(self):\n return str(self.tuple)", + "docstring": "Return the string representation of the coordinate sequence.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "StackTraceFilter", + "source_code": "class StackTraceFilter(StackTraceTransform):\n _stack_dict = _source_filter_stacks\n\n def __init__(self):\n self.internal_set = _tf_stack.PyBindFileSet()\n\n def update(self):\n self.internal_set.update_to(set(self.get_filtered_filenames()))\n\n def get_filtered_filenames(self):\n raise NotImplementedError('subclasses need to override this')", + "docstring": "Allows filtering traceback information by removing superfluous frames.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py", + "ast_data": "ClassDef name:StackTraceFilter Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:update arg:self arguments arg Call Call Call FunctionDef name:get_filtered_filenames arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, wait_until_step):\n self._wait_until_step = wait_until_step", + "docstring": "Initializes a . Args: wait_until_step: an shows until which global step should we wait.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:wait_until_step arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "cheby2", + "source_code": "def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):\n return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2', fs=fs)", + "docstring": "Chebyshev type II digital and analog filter design. Design an Nth-order digital or analog Chebyshev type II filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rs : float The minimum attenuation required in the stop band. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For Type II filters, this is the point in the transition band at which the gain first reaches -. For digital filters, are in the same units as . By default, is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. ( is thus in half-cycles / sample.) For analog filters, is an angular frequency (e.g., rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator () and denominator () polynomials of the IIR filter. Only returned if `cheby1`) format): >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 17 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [s]') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:cheby2 arg:N arg:rs arg:Wn arg:btype arg:analog arg:output arg:fs arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "check_value_size", + "source_code": "def check_value_size(value, mask: npt.NDArray[np.bool_], length: int):\n if is_array_like(value):\n if len(value) != length:\n raise ValueError(f\"Length of 'value' does not match. Got ({len(value)}) expected {length}\")\n value = value[mask]\n return value", + "docstring": "Validate the size of the values passed to ExtensionArray.fillna.", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:check_value_size arg:value arg:mask arg:length arguments arg arg arg If Call If Compare Call Raise Call Call Assign Return return:yes" + }, + { + "library": "django", + "name": "pack", + "source_code": "def pack(structure, data):\n return struct.pack('<' + structure, *data)", + "docstring": "Pack data into hex string with little endian format.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py", + "ast_data": "FunctionDef name:pack arg:structure arg:data arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Replicate", + "source_code": "@dataclass(frozen=True)\nclass Replicate(Placement):\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, Replicate)\n\n def __hash__(self) -> int:\n return -1\n\n def __repr__(self) -> str:\n return 'Replicate()'\n\n def __str__(self) -> str:\n return 'R'\n\n def _replicate_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, src_data_rank: Optional[int]=0) -> torch.Tensor:\n my_coordinate = mesh.get_coordinate()\n if my_coordinate is None:\n return tensor.new_empty(0, requires_grad=tensor.requires_grad)\n tensor = tensor.contiguous()\n if src_data_rank is not None:\n mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim, group_src=src_data_rank)\n return tensor", + "docstring": "The ``, etc.)", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "ClassDef name:Replicate FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:_replicate_tensor arg:self arg:tensor arg:mesh arg:mesh_dim arg:src_data_rank arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Assign Call If Compare Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_top_partitions", + "source_code": "def get_top_partitions(partitions: list[Partition]) -> list[Partition]:\n top_partitions = [partition for partition in partitions if len(partition.parents) == 0]\n return top_partitions", + "docstring": "This function is to return all the partitions without parents as the starting points of all the paths", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py", + "ast_data": "FunctionDef name:get_top_partitions arg:partitions arguments arg Assign Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_group_tag", + "source_code": "def _get_group_tag(pg: ProcessGroup) -> str:\n tag = _world.pg_to_tag[pg]\n tag = tag.removeprefix('user:')\n return tag", + "docstring": "Return the tag associated with ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_get_group_tag arg:pg arguments arg Assign Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "write_data_chunk", + "source_code": "def write_data_chunk(self, rows: np.ndarray, indexes: list[np.ndarray], mask: npt.NDArray[np.bool_] | None, values: list[np.ndarray]) -> None:\n for v in values:\n if not np.prod(v.shape):\n return\n nrows = indexes[0].shape[0]\n if nrows != len(rows):\n rows = np.empty(nrows, dtype=self.dtype)\n names = self.dtype.names\n nindexes = len(indexes)\n for i, idx in enumerate(indexes):\n rows[names[i]] = idx\n for i, v in enumerate(values):\n rows[names[i + nindexes]] = v\n if mask is not None:\n m = ~mask.ravel().astype(bool, copy=False)\n if not m.all():\n rows = rows[m]\n if len(rows):\n self.table.append(rows)\n self.table.flush()", + "docstring": "Parameters ---------- rows : an empty memory space where we are putting the chunk indexes : an array of the indexes mask : an array of the masks values : an array of the values", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:write_data_chunk arg:self arg:rows arg:indexes arg:mask arg:values arguments arg arg arg arg arg For If Call Return return:no Assign If Compare Call Assign Call Assign Assign Call For Call Assign For Call Assign If Compare Assign Call Call If Call Assign If Call Call Call" + }, + { + "library": "tensorflow", + "name": "stack", + "source_code": "def stack(context=1):\n return _inspect.stack(context)[1:]", + "docstring": "TFDecorator-aware replacement for inspect.stack.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:stack arg:context arguments arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "get_top_layer", + "source_code": "def get_top_layer(self):\n return self._spritelayers[self._spritelist[-1]]", + "docstring": "return the top layer LayeredUpdates.get_top_layer(): return layer", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_top_layer arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "conditional_escape", + "source_code": "def conditional_escape(text):\n if isinstance(text, Promise):\n text = str(text)\n if hasattr(text, '__html__'):\n return text.__html__()\n else:\n return escape(text)", + "docstring": "Similar to escape(), except that it doesn't operate on pre-escaped strings. This function relies on the __html__ convention used both by Django's SafeData class and by third-party libraries like markupsafe.", + "type": "function", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:conditional_escape arg:text arguments arg If Call Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "replace", + "source_code": "def replace(self, old, new, count=None):\n return replace(self, old, new, count if count is not None else -1)", + "docstring": "For each element in , return a copy of the string with all occurrences of substring replaced by . See Also -------- char.replace", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:replace arg:self arg:old arg:new arg:count arguments arg arg arg arg Return return:yes Call Compare" + }, + { + "library": "pandas", + "name": "AttributeConflictWarning", + "source_code": "class AttributeConflictWarning(Warning):\n pass", + "docstring": "Warning raised when index attributes conflict when using HDFStore. Occurs when attempting to append an index with a different name than the existing index on an HDFStore or attempting to append an index with a different frequency than the existing index on an HDFStore. See Also -------- HDFStore : Dict-like IO interface for storing pandas objects in PyTables. DataFrame.to_hdf : Write the contained data to an HDF5 file using HDFStore. read_hdf : Read from an HDF5 file into a DataFrame. Examples -------- >>> idx1 = pd.Index([\"a\", \"b\"], name=\"name1\") >>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1) >>> df1.to_hdf(\"file\", \"data\", \"w\", append=True) # doctest: +SKIP >>> idx2 = pd.Index([\"c\", \"d\"], name=\"name2\") >>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2) >>> df2.to_hdf(\"file\", \"data\", \"a\", append=True) # doctest: +SKIP AttributeConflictWarning: the [index_name] attribute of the existing index is [name1] which conflicts with the new [name2]...", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:AttributeConflictWarning" + }, + { + "library": "pandas", + "name": "as_unit", + "source_code": "def as_unit(self, unit: str, round_ok: bool=True) -> Self:\n if unit not in ['s', 'ms', 'us', 'ns']:\n raise ValueError(\"Supported units are 's', 'ms', 'us', 'ns'\")\n dtype = np.dtype(f'{self.dtype.kind}8[{unit}]')\n new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok)\n if isinstance(self.dtype, np.dtype):\n new_dtype = new_values.dtype\n else:\n tz = cast('DatetimeArray', self).tz\n new_dtype = DatetimeTZDtype(tz=tz, unit=unit)\n return type(self)._simple_new(new_values, dtype=new_dtype, freq=self.freq)", + "docstring": "Convert to a dtype with the given unit resolution. The limits of timestamp representation depend on the chosen resolution. Different resolutions can be converted to each other through as_unit. Parameters ---------- unit : {'s', 'ms', 'us', 'ns'} round_ok : bool, default True If False and the conversion requires rounding, raise ValueError. Returns ------- same type as self Converted to the specified unit. See Also -------- Timestamp.as_unit : Convert to the given unit. Examples -------- For :class:: >>> idx = pd.DatetimeIndex([\"2020-01-02 01:02:03.004005006\"]) >>> idx DatetimeIndex(['2020-01-02 01:02:03.004005006'], dtype='datetime64[ns]', freq=None) >>> idx.as_unit(\"s\") DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None) For :class:: >>> tdelta_idx = pd.to_timedelta([\"1 day 3 min 2 us 42 ns\"]) >>> tdelta_idx TimedeltaIndex(['1 days 00:03:00.000002042'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.as_unit(\"s\") TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:as_unit arg:self arg:unit arg:round_ok arguments arg arg arg If Compare Raise Call Assign Call Assign Call If Call Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "__sklearn_tags__", + "source_code": "def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise\n tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse\n return tags", + "docstring": "Indicate if wrapped estimator is using a precomputed Gram matrix", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "serve", + "source_code": "def serve(path=localFile, port=8080, root=None):\n if coverage is None:\n raise ImportError('The coverage module could not be imported.')\n from coverage import coverage\n cov = coverage(data_file=path)\n cov.load()\n cherrypy.config.update({'server.socket_port': int(port), 'server.thread_pool': 10, 'environment': 'production'})\n cherrypy.quickstart(CoverStats(cov, root))", + "docstring": "Serve the coverage app over HTTP.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\covercp.py", + "ast_data": "FunctionDef name:serve arg:path arg:port arg:root arguments arg arg arg If Compare Raise Call Assign Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "Hunk", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass Hunk:\n file: str\n start: int\n length: int\n lines: list[str]\n\n def added_lines(self) -> Generator[tuple[int, str], None, None]:\n current_line_no = self.start\n for line in self.lines:\n if line.startswith('+'):\n yield (current_line_no, line[1:])\n current_line_no += 1\n elif line.startswith('-'):\n continue\n else:\n current_line_no += 1", + "docstring": "Represents a hunk of a diff.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\diff_parser.py", + "ast_data": "ClassDef name:Hunk FunctionDef name:added_lines arg:self arguments arg Assign For If Call If Call Call" + }, + { + "library": "tensorflow", + "name": "getdoc", + "source_code": "def getdoc(object):\n return _inspect.getdoc(object)", + "docstring": "TFDecorator-aware replacement for inspect.getdoc. Args: object: An object, possibly decorated. Returns: The docstring associated with the object. The outermost-decorated object is intended to have the most complete documentation, so the decorated parameter is not unwrapped.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:getdoc arg:object arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_transform_general", + "source_code": "def _transform_general(self, func: Callable, engine, engine_kwargs, *args, **kwargs) -> Series:\n if maybe_use_numba(engine):\n return self._transform_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs)\n assert callable(func)\n klass = type(self.obj)\n results = []\n for name, group in self._grouper.get_iterator(self._obj_with_exclusions):\n object.__setattr__(group, 'name', name)\n res = func(group, *args, **kwargs)\n results.append(klass(res, index=group.index))\n if results:\n from pandas.core.reshape.concat import concat\n concatenated = concat(results, ignore_index=True)\n result = self._set_result_index_ordered(concatenated)\n else:\n result = self.obj._constructor(dtype=np.float64)\n result.name = self.obj.name\n return result", + "docstring": "Transform with a callable .", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\generic.py", + "ast_data": "FunctionDef name:_transform_general arg:self arg:func arg:engine arg:engine_kwargs arguments arg arg arg arg arg arg If Call Return return:yes Call Call Assign Call Assign For Call Call Assign Call Call Call If Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "impl_save_for_backward", + "source_code": "def impl_save_for_backward(qualname, *, func=None):\n\n def inner(func):\n custom_op = _find_custom_op(qualname, also_check_torch_library=True)\n custom_op.impl_save_for_backward(_stacklevel=3)(func)\n return func\n if func is None:\n return inner\n return inner(func)", + "docstring": "Register a function that tells us what to save for backward. Please see :func: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\_custom_ops.py", + "ast_data": "FunctionDef name:impl_save_for_backward arg:qualname arguments arg arg FunctionDef name:inner arg:func arguments arg Assign Call Call Call Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "pixel_count", + "source_code": "@property\ndef pixel_count(self):\n return self.width * self.height", + "docstring": "Return the total number of pixels in this band.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:pixel_count arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__tf_tensor__", + "source_code": "def __tf_tensor__(self, dtype=None, name=None):\n pass", + "docstring": "Converts this object to a Tensor. Args: dtype: data type for the returned Tensor name: a name for the operations which create the Tensor Returns: A Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "FunctionDef name:__tf_tensor__ arg:self arg:dtype arg:name arguments arg arg arg" + }, + { + "library": "scrapy", + "name": "string_camelcase", + "source_code": "def string_camelcase(string: str) -> str:\n return CAMELCASE_INVALID_CHARS.sub('', string.title())", + "docstring": "Convert a word to its CamelCase version and remove invalid chars >>> string_camelcase('lost-pound') 'LostPound' >>> string_camelcase('missing_images') 'MissingImages'", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\template.py", + "ast_data": "FunctionDef name:string_camelcase arg:string arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "reset_accumulated_memory_stats", + "source_code": "def reset_accumulated_memory_stats(device: 'Device'=None) -> None:\n device = _get_device_index(device, optional=True)\n return torch._C._cuda_resetAccumulatedMemoryStats(device)", + "docstring": "Reset the \"accumulated\" (historical) stats tracked by the CUDA memory allocator. See :func: for details. Accumulated stats correspond to the and keys in each individual stat dict, as well as and . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:reset_accumulated_memory_stats arg:device arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "LazyBatchNorm2d", + "source_code": "class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):\n cls_to_become = BatchNorm2d\n\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError(f'expected 4D input (got {input.dim()}D input)')", + "docstring": "A :class: module with lazy initialization. Lazy initialization is done for the `BatchNorm2dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinrunning_meanrunning_var`", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py", + "ast_data": "ClassDef name:LazyBatchNorm2d Assign FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "register_function_func", + "source_code": "def register_function_func(ops):\n\n def wrapper(func):\n for op in ops:\n _MASKEDTENSOR_FUNCTION_TABLE[op] = partial(func, op)\n return wrapper", + "docstring": "Used for registering a new __torch_function__ function to MaskedTensor Called via _MASKEDTENSOR_FUNCTION_TABLE The code to register a new function looks like: @register_function_func(list_of_ops) def foo(func, *args, **kwargs):", + "type": "function", + "file_path": "pytorch\\torch\\masked\\maskedtensor\\_ops_refs.py", + "ast_data": "FunctionDef name:register_function_func arg:ops arguments arg FunctionDef name:wrapper arg:func arguments arg For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__str__", + "source_code": "def __str__(self) -> str:\n self.real_recompile()\n return super().__str__()", + "docstring": "str(GraphModule) will access the _code attribute. Make sure recompile happens so _code attribute is available.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "GradNotSetToNonePattern", + "source_code": "class GradNotSetToNonePattern(Pattern):\n\n def __init__(self, prof: profile, should_benchmark: bool=False):\n super().__init__(prof, should_benchmark)\n self.name = 'Gradient Set To Zero Instead of None Pattern'\n self.description = \"Detected gradient set to zero instead of None. Please add 'set_to_none=True' when calling zero_grad().\"\n self.url = 'https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-gradient-calculation-for-validation-or-inference'\n\n def match(self, event: _ProfilerEvent):\n if not event.name.endswith(': zero_grad'):\n return False\n if not event.children:\n return False\n for sub_event in traverse_dfs(event.children):\n if sub_event.name == 'aten::zero_' and sub_event.parent.name != 'aten::zeros':\n return True\n return False", + "docstring": "This pattern identifies if we are not setting grad to None in zero_grad. example: optimizer.zero_grad() By setting set_to_none=True, we can gain speedup Pattern: XXXXX: _zero_grad NOT aten::zeros aten::zero_ aten::zero_ is called on each parameter in the model. We also want to make sure it is not called by aten::zeros. Algorithm: String match", + "type": "class", + "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py", + "ast_data": "ClassDef name:GradNotSetToNonePattern FunctionDef name:__init__ arg:self arg:prof arg:should_benchmark arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:match arg:self arg:event arguments arg arg If Call Return return:yes If Return return:yes For Call If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "_jackknife_resample", + "source_code": "def _jackknife_resample(sample, batch=None):\n n = sample.shape[-1]\n batch_nominal = batch or n\n for k in range(0, n, batch_nominal):\n batch_actual = min(batch_nominal, n - k)\n j = np.ones((batch_actual, n), dtype=bool)\n np.fill_diagonal(j[:, k:k + batch_actual], False)\n i = np.arange(n)\n i = np.broadcast_to(i, (batch_actual, n))\n i = i[j].reshape((batch_actual, n - 1))\n resamples = sample[..., i]\n yield resamples", + "docstring": "Jackknife resample the sample. Only one-sample stats for now.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_resampling.py", + "ast_data": "FunctionDef name:_jackknife_resample arg:sample arg:batch arguments arg arg Assign Assign BoolOp For Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign" + }, + { + "library": "pandas", + "name": "_multi_take", + "source_code": "def _multi_take(self, tup: tuple):\n d = {axis: self._get_listlike_indexer(key, axis) for key, axis in zip(tup, self.obj._AXIS_ORDERS)}\n return self.obj._reindex_with_indexers(d, allow_dups=True)", + "docstring": "Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_multi_take arg:self arg:tup arguments arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "get_region", + "source_code": "def get_region(z):\n if z == 1 + 0j:\n return 0\n elif abs(z) < 0.9 and z.real >= 0:\n return 1\n elif abs(z) <= 1 and z.real < 0:\n return 2\n elif 0.9 <= abs(z) <= 1 and abs(1 - z) < 0.9:\n return 3\n elif 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9:\n return 4\n elif 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and (z.real >= 0):\n return 5\n else:\n return 6", + "docstring": "Assign numbers for regions where hyp2f1 must be handled differently.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_precompute\\hyp2f1_data.py", + "ast_data": "FunctionDef name:get_region arg:z arguments arg If Compare Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Call Return return:yes If BoolOp Compare Call Compare Call Return return:yes If BoolOp Compare Call Compare Call Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_params", + "source_code": "def set_params(self, **kwargs):\n _api.warn_external(\"'set_params()' not defined for locator of type \" + str(type(self)))", + "docstring": "Do nothing, and raise a warning. Any locator class not supporting the set_params() function will call this.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "device", + "source_code": "@property\ndef device(self):\n return self._vars[0].device", + "docstring": "The device this variable is on.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py", + "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "null_count", + "source_code": "@cache_readonly\ndef null_count(self) -> int:\n return self._col.isna().sum().item()", + "docstring": "Number of null elements. Should always be known.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\column.py", + "ast_data": "FunctionDef name:null_count arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "_set_active_handle", + "source_code": "def _set_active_handle(self, event):\n c_idx, c_dist = self._corner_handles.closest(event.x, event.y)\n e_idx, e_dist = self._edge_handles.closest(event.x, event.y)\n m_idx, m_dist = self._center_handle.closest(event.x, event.y)\n if 'move' in self._state:\n self._active_handle = 'C'\n elif m_dist < self.grab_range * 2:\n self._active_handle = 'C'\n elif c_dist > self.grab_range and e_dist > self.grab_range:\n if self.drag_from_anywhere and self._contains(event):\n self._active_handle = 'C'\n else:\n self._active_handle = None\n return\n elif c_dist < e_dist:\n self._active_handle = self._corner_order[c_idx]\n else:\n self._active_handle = self._edge_order[e_idx]", + "docstring": "Set active handle based on the location of the mouse event.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_set_active_handle arg:self arg:event arguments arg arg Assign Call Assign Call Assign Call If Compare Assign If Compare Assign If BoolOp Compare Compare If BoolOp Call Assign Assign Return return:no If Compare Assign Assign" + }, + { + "library": "matplotlib", + "name": "has_fignum", + "source_code": "@classmethod\ndef has_fignum(cls, num):\n return num in cls.figs", + "docstring": "Return whether figure number *num* exists.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py", + "ast_data": "FunctionDef name:has_fignum arg:cls arg:num arguments arg arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_nvtx_range_pop", + "source_code": "def _nvtx_range_pop():\n if torch.cuda.is_available():\n torch.cuda.nvtx.range_pop()", + "docstring": "If PyTorch is installed with CUDA support, this terminates NVTX range. Check torch.cuda.nvtx.range_pop's document for more details.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", + "ast_data": "FunctionDef name:_nvtx_range_pop arguments If Call Call" + }, + { + "library": "tensorflow", + "name": "enable_traceback_filtering", + "source_code": "@tf_export('debugging.enable_traceback_filtering')\ndef enable_traceback_filtering():\n if sys.version_info.major != 3 or sys.version_info.minor < 7:\n raise RuntimeError(f'Traceback filtering is only available with Python 3.7 or higher. This Python version: {sys.version}')\n global _ENABLE_TRACEBACK_FILTERING\n _ENABLE_TRACEBACK_FILTERING.value = True", + "docstring": "Enable filtering out TensorFlow-internal frames in exception stack traces. Raw TensorFlow stack traces involve many internal frames, which can be challenging to read through, while not being actionable for end users. By default, TensorFlow filters internal frames in most exceptions that it raises, to keep stack traces short, readable, and focused on what's actionable for end users (their own code). If you have previously disabled traceback filtering via , you can re-enable it via . Raises: RuntimeError: If Python version is not at least 3.7.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\traceback_utils.py", + "ast_data": "FunctionDef name:enable_traceback_filtering arguments If BoolOp Compare Compare Raise Call Assign Call" + }, + { + "library": "pytorch", + "name": "zip_schema", + "source_code": "def zip_schema(schema: _C.FunctionSchema, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Iterable[tuple[_C.Argument, Any]]:\n assert len(schema.arguments) >= len(args) + len(kwargs)\n for i in range(len(schema.arguments)):\n info = schema.arguments[i]\n if info.kwarg_only:\n if info.name in kwargs:\n yield (info, kwargs[info.name])\n continue\n if i >= len(args):\n if not info.kwarg_only and info.name in kwargs:\n yield (info, kwargs[info.name])\n continue\n yield (info, args[i])\n return", + "docstring": "zips schema.arguments and (args, kwargs) together. Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload: that is, (args, kwargs) must be bindable to the schema (args, kwargs).", + "type": "function", + "file_path": "pytorch\\torch\\_library\\utils.py", + "ast_data": "FunctionDef name:zip_schema arg:schema arg:args arg:kwargs arguments arg arg arg Compare Call Call Call For Call Call Assign If If Compare If Compare Call If BoolOp Compare Return return:no" + }, + { + "library": "numpy", + "name": "clump_masked", + "source_code": "def clump_masked(a):\n mask = ma.getmask(a)\n if mask is nomask:\n return []\n return _ezclump(mask)", + "docstring": "Returns a list of slices corresponding to the masked clumps of a 1-D array. (A \"clump\" is defined as a contiguous region of the array). Parameters ---------- a : ndarray A one-dimensional masked array. Returns ------- slices : list of slice The list of slices, one for each continuous region of masked elements in . See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges notmasked_contiguous, clump_unmasked Examples -------- >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:clump_masked arg:a arguments arg Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_in_projection_packed", + "source_code": "def _in_projection_packed(q: Tensor, k: Tensor, v: Tensor, w: Tensor, b: Optional[Tensor]=None) -> list[Tensor]:\n E = q.size(-1)\n if k is v:\n if q is k:\n proj = linear(q, w, b)\n proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()\n return (proj[0], proj[1], proj[2])\n else:\n w_q, w_kv = w.split([E, E * 2])\n if b is None:\n b_q = b_kv = None\n else:\n b_q, b_kv = b.split([E, E * 2])\n q_proj = linear(q, w_q, b_q)\n kv_proj = linear(k, w_kv, b_kv)\n kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()\n return (q_proj, kv_proj[0], kv_proj[1])\n else:\n w_q, w_k, w_v = w.chunk(3)\n if b is None:\n b_q = b_k = b_v = None\n else:\n b_q, b_k, b_v = b.chunk(3)\n return (linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v))", + "docstring": "Perform the in-projection step of the attention operation, using packed weights. Output is a triple containing projection tensors for query, key and value. Args: q, k, v: query, key and value tensors to be projected. For self-attention, these are typically the same tensor; for encoder-decoder attention, k and v are typically the same tensor. (We take advantage of these identities for performance if they are present.) Regardless, q, k and v must share a common embedding dimension; otherwise their shapes may vary. w: projection weights for q, k and v, packed into a single tensor. Weights are packed along dimension 0, in q, k, v order. b: optional projection biases for q, k and v, packed into a single tensor in q, k, v order. Shape: Inputs: - q: :math: where E is the embedding dimension - k: :math: where E is the embedding dimension - v: :math: where E is the embedding dimension - w: :math: where E is the embedding dimension - b: :math: where E is the embedding dimension Output: - in output list :math:, each output tensor will have the same shape as the corresponding input tensor.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:_in_projection_packed arg:q arg:k arg:v arg:w arg:b arguments arg arg arg arg arg Assign Call If Compare If Compare Assign Call Assign Call Call Call Call Call Return return:yes Assign Call If Compare Assign Assign Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes Assign Call If Compare Assign Assign Call Return return:yes Call Call Call" + }, + { + "library": "seaborn", + "name": "facet_data", + "source_code": "def facet_data(self):\n data = self.data\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks), enumerate(col_masks), enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield ((i, j, k), data_ijk)", + "docstring": "Generator for name indices and data subsets for each facet. Yields ------ (i, j, k), data_ijk : tuple of ints, DataFrame The ints provide an index into the {row, col, hue}_names attribute, and the dataframe contains a subset of the full data corresponding to each facet. The generator yields subsets that correspond with the self.axes.flat iterator, or self.axes[i, j] when is None.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:facet_data arg:self arguments arg Assign If Assign Compare Assign Call Call If Assign Compare Assign Call Call If Assign Compare Assign Call Call For Call Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n state = state_dict['state']\n data_groups, defaults = (state_dict['data_groups'], state_dict['defaults'])\n self.__set_state__({'state': state, 'data_groups': data_groups, 'defaults': defaults})", + "docstring": "The load_state_dict() restores the state of the sparsifier based on the state_dict Args: * state_dict - the dictionary that to which the current sparsifier needs to be restored to", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Assign Call" + }, + { + "library": "pandas", + "name": "std", + "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef std(self, ddof: int=1, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, numeric_only: bool=False, skipna: bool=True):\n if maybe_use_numba(engine):\n from pandas.core._numba.kernels import grouped_var\n return np.sqrt(self._numba_agg_general(grouped_var, executor.float_dtype_mapping, engine_kwargs, min_periods=0, ddof=ddof, skipna=skipna))\n else:\n return self._cython_agg_general('std', alt=lambda x: Series(x, copy=False).std(ddof=ddof, skipna=skipna), numeric_only=numeric_only, ddof=ddof, skipna=skipna)", + "docstring": "Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is `floatintboolean`. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. .. versionadded:: 3.0.0 Returns ------- Series or DataFrame Standard deviation of values within each group. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"] >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) >>> ser a 7 a 2 a 8 b 4 b 3 b 3 dtype: int64 >>> ser.groupby(level=0).std() a 3.21455 b 0.57735 dtype: float64 For DataFrameGroupBy: >>> data = {\"a\": [1, 3, 5, 7, 7, 8, 3], \"b\": [1, 4, 8, 4, 4, 2, 1]} >>> df = pd.DataFrame( ... data, index=[\"dog\", \"dog\", \"dog\", \"mouse\", \"mouse\", \"mouse\", \"mouse\"] ... ) >>> df a b dog 1 1 dog 3 4 dog 5 8 mouse 7 4 mouse 7 4 mouse 8 2 mouse 3 1 >>> df.groupby(level=0).std() a b dog 2.000000 3.511885 mouse 2.217356 1.500000", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:std arg:self arg:ddof arg:engine arg:engine_kwargs arg:numeric_only arg:skipna arguments arg arg arg arg arg arg If Call Return return:yes Call Call Return return:yes Call arguments arg Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "getfullargspec", + "source_code": "def getfullargspec(obj):\n decorators, target = tf_decorator.unwrap(obj)\n for d in decorators:\n if d.decorator_argspec is not None:\n return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)\n return _getfullargspec(target)", + "docstring": "TFDecorator-aware replacement for . This wrapper emulates in[^)]* Python2. Args: obj: A callable, possibly decorated. Returns: The that describes the signature of the outermost decorator that changes the callable's signature. If the callable is not decorated, will be called directly on the callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:getfullargspec arg:obj arguments arg Assign Call For If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_wrapper", + "source_code": "def _get_wrapper(x, tf_should_use_helper):\n type_x = type(x)\n memoized = _WRAPPERS.get(type_x, None)\n if memoized:\n return memoized(x, tf_should_use_helper)\n tx = copy.deepcopy(ShouldUseWrapper)\n bases = getattr(tx, '__orig_bases__', tx.__bases__)\n\n def set_body(ns):\n ns.update(tx.__dict__)\n return ns\n copy_tx = types.new_class(tx.__name__, bases, exec_body=set_body)\n copy_tx.__init__ = _new__init__\n copy_tx.__getattribute__ = _new__getattribute__\n for op in OVERLOADABLE_OPERATORS:\n if hasattr(type_x, op):\n setattr(copy_tx, op, getattr(type_x, op))\n copy_tx.mark_used = _new_mark_used\n copy_tx.__setattr__ = _new__setattr__\n _WRAPPERS[type_x] = copy_tx\n return copy_tx(x, tf_should_use_helper)", + "docstring": "Create a wrapper for object x, whose class subclasses type(x). The wrapper will emit a warning if it is deleted without any of its properties being accessed or methods being called. Args: x: The instance to wrap. tf_should_use_helper: The object that tracks usage. Returns: An object wrapping , of type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py", + "ast_data": "FunctionDef name:_get_wrapper arg:x arg:tf_should_use_helper arguments arg arg Assign Call Assign Call If Return return:yes Call Assign Call Assign Call FunctionDef name:set_body arg:ns arguments arg Call Return return:yes Assign Call Assign Assign For If Call Call Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "apply_transform_mask", + "source_code": "def apply_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n raise NotImplementedError", + "docstring": "Process masks corresponding to the inputs that are transformed.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\base.py", + "ast_data": "FunctionDef name:apply_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise" + }, + { + "library": "kornia", + "name": "VerticalFlip", + "source_code": "class VerticalFlip(OperationBase):\n\n def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n super().__init__(K.RandomVerticalFlip(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False)", + "docstring": "Apply vertical flip operation. Args: initial_magnitude: the initial magnitude. temperature: temperature for RelaxedBernoulli distribution used during training.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", + "ast_data": "ClassDef name:VerticalFlip FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call" + }, + { + "library": "scipy", + "name": "_process_parameters", + "source_code": "def _process_parameters(self, dim):\n if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n return dim", + "docstring": "Dimension N must be specified; it cannot be inferred.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_transform_X_ordinal", + "source_code": "def _transform_X_ordinal(self, X_out, X_ordinal, X_unknown_mask, row_indices, encodings, target_mean):\n if self.target_type_ == 'multiclass':\n n_classes = len(self.classes_)\n for e_idx, encoding in enumerate(encodings):\n feat_idx = e_idx // n_classes\n mean_idx = e_idx % n_classes\n X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]]\n X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx]\n else:\n for e_idx, encoding in enumerate(encodings):\n X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]]\n X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean", + "docstring": "Transform X_ordinal using encodings. In the multiclass case, and have column (axis=1) size , while has length of size . deals with this by repeating feature indices by E.g., for 3 features, 2 classes: 0,0,1,1,2,2 Additionally, is of shape (,) so cycles through 0 to - 1, times.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py", + "ast_data": "FunctionDef name:_transform_X_ordinal arg:self arg:X_out arg:X_ordinal arg:X_unknown_mask arg:row_indices arg:encodings arg:target_mean arguments arg arg arg arg arg arg arg If Compare Assign Call For Call Assign Assign Assign Assign For Call Assign Assign" + }, + { + "library": "sphinx", + "name": "add_linkcode_domain", + "source_code": "def add_linkcode_domain(domain: str, keys: list[str], override: bool=False) -> None:\n if override or domain not in _DOMAIN_KEYS:\n _DOMAIN_KEYS[domain] = list(keys)", + "docstring": "Register a new list of keys to use for a domain. .. versionadded:: 8.2", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\linkcode.py", + "ast_data": "FunctionDef name:add_linkcode_domain arg:domain arg:keys arg:override arguments arg arg arg If BoolOp Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "render_glyph", + "source_code": "def render_glyph(self, output: Output, ox: float, oy: float, font: str, font_class: str, sym: str, fontsize: float, dpi: float) -> None:\n info = self._get_info(font, font_class, sym, fontsize, dpi)\n output.glyphs.append((ox, oy, info))", + "docstring": "At position (*ox*, *oy*), draw the glyph specified by the remaining parameters (see for their detailed description).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:render_glyph arg:self arg:output arg:ox arg:oy arg:font arg:font_class arg:sym arg:fontsize arg:dpi arguments arg arg arg arg arg arg arg arg arg Assign Call Call" + }, + { + "library": "pandas", + "name": "_truncate_vertically", + "source_code": "def _truncate_vertically(self) -> None:\n assert self.max_rows_fitted is not None\n row_num = self.max_rows_fitted // 2\n if row_num >= 1:\n _len = len(self.tr_frame)\n _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])\n self.tr_frame = self.tr_frame.iloc[_slice]\n else:\n row_num = cast(int, self.max_rows)\n self.tr_frame = self.tr_frame.iloc[:row_num, :]\n self.tr_row_num = row_num", + "docstring": "Remove rows, which are not to be displayed. Attributes affected: - tr_frame - tr_row_num", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:_truncate_vertically arg:self arguments arg Compare Assign If Compare Assign Call Assign Call Call Call Assign Assign Call Assign Assign" + }, + { + "library": "django", + "name": "copy_file", + "source_code": "def copy_file(self, path, prefixed_path, source_storage):\n if prefixed_path in self.copied_files:\n return self.log(\"Skipping '%s' (already copied earlier)\" % path)\n if not self.delete_file(path, prefixed_path, source_storage):\n return\n source_path = source_storage.path(path)\n if self.dry_run:\n self.log(\"Pretending to copy '%s'\" % source_path, level=1)\n else:\n self.log(\"Copying '%s'\" % source_path, level=2)\n with source_storage.open(path) as source_file:\n self.storage.save(prefixed_path, source_file)\n self.copied_files.append(prefixed_path)", + "docstring": "Attempt to copy `` with storage", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\management\\commands\\collectstatic.py", + "ast_data": "FunctionDef name:copy_file arg:self arg:path arg:prefixed_path arg:source_storage arguments arg arg arg arg If Compare Return return:yes Call If Call Return return:no Assign Call If Call Call With Call Call Call" + }, + { + "library": "tensorflow", + "name": "smart_cond", + "source_code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if isinstance(pred, variables.Variable):\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", + "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py", + "ast_data": "FunctionDef name:smart_cond arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_connectionstyle", + "source_code": "def get_connectionstyle(self):\n return self._connector", + "docstring": "Return the used.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_connectionstyle arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "print_", + "source_code": "def print_(*objects, **kwargs):\n unknown_kwargs = tuple(set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))\n if unknown_kwargs:\n raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))\n print_fn = _py_print\n for x in objects:\n print_override = registry_lookup(print_registry, x)\n if print_override is not None:\n print_fn = print_override\n break\n if print_fn is _py_print:\n assert not any((tensor_util.is_tf_type(s) for s in objects))\n return print_fn(*objects, **kwargs)", + "docstring": "Overload of the print builtin.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py", + "ast_data": "FunctionDef name:print_ arguments arg arg Assign Call Call Call Call If Raise Call Call Assign For Assign Call If Compare Assign If Compare Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "is_msys_mingw", + "source_code": "def is_msys_mingw():\n return False", + "docstring": "Return true if this in an MinGW/MSYS build The user may prompted for confirmation so only call this function once.", + "type": "function", + "file_path": "pygame\\buildconfig\\config.py", + "ast_data": "FunctionDef name:is_msys_mingw arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "op", + "source_code": "@property\ndef op(self) -> ops.Operation:\n return self._parent_op", + "docstring": "The op for this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "isna", + "source_code": "def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:\n raise AbstractMethodError(self)", + "docstring": "A 1-D array indicating if each value is missing. Returns ------- numpy.ndarray or pandas.api.extensions.ExtensionArray In most cases, this should return a NumPy ndarray. For exceptional cases like `ExtensionArray._reduceExtensionArray._accumulate` should be implemented Examples -------- >>> arr = pd.array([1, 2, np.nan, np.nan]) >>> arr.isna() array([False, False, True, True])", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:isna arg:self arguments arg Raise Call" + }, + { + "library": "scipy", + "name": "tsem", + "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,))\ndef tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):\n xp = array_namespace(a)\n a, _ = _put_val_to_limits(a, limits, inclusive, xp=xp)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', SmallSampleWarning)\n sd = _xp_var(a, correction=ddof, axis=axis, nan_policy='omit', xp=xp) ** 0.5\n not_nan = xp.astype(~xp.isnan(a), a.dtype)\n n_obs = xp.sum(not_nan, axis=axis, dtype=sd.dtype)\n return sd / n_obs ** 0.5", + "docstring": "Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given . Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Trimmed standard error of the mean. Notes ----- uses unbiased sample standard deviation, i.e. it uses a correction factor ``. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tsem(x) 1.3228756555322954 >>> stats.tsem(x, (3,17)) 1.1547005383792515", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:tsem arg:a arg:limits arg:inclusive arg:axis arg:ddof arguments arg arg arg arg arg Assign Call Assign Call With Call Call Assign Call Assign Call Call Assign Call Return return:yes Call Call arguments arg arguments arg arg" + }, + { + "library": "matplotlib", + "name": "remove_tool", + "source_code": "def remove_tool(self, name):\n tool = self.get_tool(name)\n if getattr(tool, 'toggled', False):\n self.trigger_tool(tool, 'toolmanager')\n self._remove_keys(name)\n event = ToolEvent('tool_removed_event', self, tool)\n self._callbacks.process(event.name, event)\n del self._tools[name]", + "docstring": "Remove tool named *name*. Parameters ---------- name : str Name of the tool.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:remove_tool arg:self arg:name arguments arg arg Assign Call If Call Call Call Assign Call Call" + }, + { + "library": "django", + "name": "exclude", + "source_code": "def exclude(self, *args, **kwargs):\n self._not_support_combined_queries('exclude')\n return self._filter_or_exclude(True, args, kwargs)", + "docstring": "Return a new QuerySet instance with NOT (args) ANDed to the existing set.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:exclude arg:self arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "LibraryInfo", + "source_code": "class LibraryInfo:\n\n def __init__(self, name, description, version, sections, vars, requires=None):\n self.name = name\n self.description = description\n if requires:\n self.requires = requires\n else:\n self.requires = []\n self.version = version\n self._sections = sections\n self.vars = vars\n\n def sections(self):\n return list(self._sections.keys())\n\n def cflags(self, section='default'):\n val = self.vars.interpolate(self._sections[section]['cflags'])\n return _escape_backslash(val)\n\n def libs(self, section='default'):\n val = self.vars.interpolate(self._sections[section]['libs'])\n return _escape_backslash(val)\n\n def __str__(self):\n m = ['Name: %s' % self.name, 'Description: %s' % self.description]\n if self.requires:\n m.append('Requires:')\n else:\n m.append('Requires: %s' % ','.join(self.requires))\n m.append('Version: %s' % self.version)\n return '\\n'.join(m)", + "docstring": "Object containing build information about a library. Parameters ---------- name : str The library name. description : str Description of the library. version : str Version string. sections : dict The sections of the configuration file for the library. The keys are the section headers, the values the text under each header. vars : class instance A instance, which contains `` pairs for variables defined in the configuration file for the library. requires : sequence, optional The required libraries for the library to be installed. Notes ----- All input parameters (except \"sections\" which is a method) are available as attributes of the same name.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py", + "ast_data": "ClassDef name:LibraryInfo FunctionDef name:__init__ arg:self arg:name arg:description arg:version arg:sections arg:vars arg:requires arguments arg arg arg arg arg arg arg Assign Assign If Assign Assign Assign Assign Assign FunctionDef name:sections arg:self arguments arg Return return:yes Call Call FunctionDef name:cflags arg:self arg:section arguments arg arg Assign Call Return return:yes Call FunctionDef name:libs arg:self arg:section arguments arg arg Assign Call Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Assign If Call Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_gen_roots_and_weights", + "source_code": "def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):\n k = np.arange(n, dtype='d')\n c = np.zeros((2, n))\n c[0, 1:] = bn_func(k[1:])\n c[1, :] = an_func(k)\n x = linalg.eigvals_banded(c, overwrite_a_band=True)\n y = f(n, x)\n dy = df(n, x)\n x -= y / dy\n fm = f(n - 1, x)\n log_fm = np.log(np.abs(fm))\n log_dy = np.log(np.abs(dy))\n fm /= np.exp((log_fm.max() + log_fm.min()) / 2.0)\n dy /= np.exp((log_dy.max() + log_dy.min()) / 2.0)\n w = 1.0 / (fm * dy)\n if symmetrize:\n w = (w + w[::-1]) / 2\n x = (x - x[::-1]) / 2\n w *= mu0 / w.sum()\n if mu:\n return (x, w, mu0)\n else:\n return (x, w)", + "docstring": "[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) Returns the roots (x) of an nth order orthogonal polynomial, and weights (w) to use in appropriate Gaussian quadrature with that orthogonal polynomial. The polynomials have the recurrence relation P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) an_func(n) should return A_n sqrt_bn_func(n) should return sqrt(B_n) mu ( = h_0 ) is the integral of the weight over the orthogonal interval", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:_gen_roots_and_weights arg:n arg:mu0 arg:an_func arg:bn_func arg:f arg:df arg:symmetrize arg:mu arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Call Call Call Assign If Assign Assign Call If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_add_summary_recording_cond_transformer", + "source_code": "def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs, cond):\n node.args.append(pasta.parse(cond))\n logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Adding `%s` argument to %s in anticipation of it being renamed to tf.compat.v2.summary.record_if()' % (cond, full_name or name)))\n return node", + "docstring": "Adds cond argument to tf.contrib.summary.xxx_record_summaries(). This is in anticipation of them being renamed to tf.summary.record_if(), which requires the cond argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py", + "ast_data": "FunctionDef name:_add_summary_recording_cond_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:cond arguments arg arg arg arg arg arg Call Call Call BoolOp Return return:yes" + }, + { + "library": "tensorflow", + "name": "shape_n", + "source_code": "@tf_export('shape_n')\n@dispatch.add_dispatch_support\ndef shape_n(input, out_type=dtypes.int32, name=None):\n return gen_array_ops.shape_n(input, out_type=out_type, name=name)", + "docstring": "Returns shape of a list of tensors. Given a list of tensors, is much faster than applying to each tensor individually. >>> a = tf.ones([1, 2]) >>> b = tf.ones([2, 3]) >>> c = tf.ones([3, 4]) >>> tf.shape_n([a, b, c]) [, , ] Args: input: A list of at least 1 object with the same dtype. out_type: The specified output type of the operation ( or ). Defaults to (optional). name: A name for the operation (optional). Returns: A list of specifying the shape of each input tensor with type of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:shape_n arg:input arg:out_type arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "_password_validators_help_text_html", + "source_code": "def _password_validators_help_text_html(password_validators=None):\n help_texts = password_validators_help_texts(password_validators)\n help_items = format_html_join('', '
  • {}
  • ', ((help_text,) for help_text in help_texts))\n return format_html('', help_items) if help_items else ''", + "docstring": "Return an HTML string with all help texts of all configured validators in an .", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\password_validation.py", + "ast_data": "FunctionDef name:_password_validators_help_text_html arg:password_validators arguments arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_runtime_arg_values", + "source_code": "@override\ndef get_runtime_arg_values(self, **kwargs: Any) -> list[Any]:\n return [kwargs[arg.name] for arg in self.get_runtime_arg_info()]", + "docstring": "Helper method to retrieve runtime args from generate kwargs", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_template.py", + "ast_data": "FunctionDef name:get_runtime_arg_values arg:self arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_reset_cache", + "source_code": "def _reset_cache(self, key: str | None=None) -> None:\n if not hasattr(self, '_cache'):\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)", + "docstring": "Reset cached properties. If `` is passed, only clears that key.", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:_reset_cache arg:self arg:key arguments arg arg If Call Return return:no If Compare Call Call" + }, + { + "library": "pytorch", + "name": "benchmark", + "source_code": "@time_and_count\ndef benchmark(self: Self, fn: Callable[..., Any], fn_args: tuple[Any, ...], fn_kwargs: dict[str, Any], **kwargs: Any) -> float:\n inferred_device = None\n for arg_or_kwarg in chain(fn_args, fn_kwargs.values()):\n if not isinstance(arg_or_kwarg, torch.Tensor):\n continue\n if inferred_device is None:\n inferred_device = arg_or_kwarg.device\n elif arg_or_kwarg.device != inferred_device:\n raise ValueError(\"Can't safely infer the device type of `fn` with multiple device types in `fn_args` and `fn_kwargs`!\")\n if inferred_device is None:\n raise ValueError(\"Can't safely infer the device type of `fn` with no device types in `fn_args` or `fn_kwargs`! You should be calling `.benchmark_cpu` or `.benchmark_gpu` directly.\")\n _callable = lambda: fn(*fn_args, **fn_kwargs)\n if inferred_device == torch.device('cpu'):\n return self.benchmark_cpu(_callable, **kwargs)\n return self.benchmark_gpu(_callable, **kwargs)", + "docstring": "Benchmark and return the runtime, in milliseconds (the actual runtime calculation is dictated by the benchmarking implementation, but may be one of [mean, median, minimum, etc.]). Functions as a convenience wrapper around device-specific implementations, like and . Raises if we can't safely infer the device type of ; for example, if multiple device types are found in and , or if no device types are found. Arguments: - fn: The function to benchmark. - fn_args: The function's arguments. - fn_kwargs: The function's kwargs. Keyword Arguments: - **kwargs: The benchmarking implementation's kwargs. Returns: - The runtime of , in milliseconds.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py", + "ast_data": "FunctionDef name:benchmark arg:self arg:fn arg:fn_args arg:fn_kwargs arguments arg arg arg arg arg Assign For Call Call If Call If Compare Assign If Compare Raise Call If Compare Raise Call Assign arguments Call If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "remove_sprites_of_layer", + "source_code": "def remove_sprites_of_layer(self, layer_nr):\n sprites = self.get_sprites_from_layer(layer_nr)\n self.remove(*sprites)\n return sprites", + "docstring": "remove all sprites from a layer and return them as a list LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:remove_sprites_of_layer arg:self arg:layer_nr arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__get_tensor_shard__", + "source_code": "def __get_tensor_shard__(self, index: MetadataIndex) -> torch.Tensor:\n if index.index is not None:\n if len(self._local_shards) > index.index and self._storage_meta.chunks[index.index].offsets == index.offset:\n return self._local_shards[index.index]\n if index.offset is not None:\n for shard, chunk in zip(self._local_shards, self._storage_meta.chunks):\n if chunk.offsets == index.offset:\n return shard\n if len(self._local_shards) == 0 and self._storage_meta.chunks[0].sizes == torch.Size([0, 0]):\n return torch.empty(0)\n raise ValueError(f\"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'\")", + "docstring": "For compatibility with DCP, we support finding shard based on index Return a 'torch.Tensor' shard based on 'MetadataIndex'.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py", + "ast_data": "FunctionDef name:__get_tensor_shard__ arg:self arg:index arguments arg arg If Compare If BoolOp Compare Call Compare Return return:yes If Compare For Call If Compare Return return:yes If BoolOp Compare Call Compare Call Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "_ensure_commit", + "source_code": "def _ensure_commit(git_sha1: str) -> None:\n cmd = git('cat-file', '-e', git_sha1 + '^{commit}')\n p = subprocess.run(cmd, capture_output=True, check=False)\n if p.returncode == 0:\n return\n cmd = git('fetch', GITHUB_REMOTE_URL, git_sha1)\n subprocess.check_call(cmd)", + "docstring": "Make sure that we actually have the commit locally", + "type": "function", + "file_path": "pytorch\\tools\\nightly.py", + "ast_data": "FunctionDef name:_ensure_commit arg:git_sha1 arguments arg Assign Call Assign Call If Compare Return return:no Assign Call Call" + }, + { + "library": "tensorflow", + "name": "GenerateCostReport", + "source_code": "def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None):\n if cluster is None:\n cluster = gcluster.Cluster(disable_detailed_stats=False)\n return tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster)", + "docstring": "Analyze the cost of each TensorFlow op and node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. per_node_report: by default the report contains stats aggregated on a per op type basis, setting per_node_report to True adds results for each individual node to the report. verbose: Prints out the entire operation proto instead of a summary table. cluster: Analyze the costs using the specified cluster, or the local machine if no cluster was specified. Returns: A string of cost report.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\grappler\\cost_analyzer.py", + "ast_data": "FunctionDef name:GenerateCostReport arg:metagraph arg:per_node_report arg:verbose arg:cluster arguments arg arg arg arg If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "FieldError", + "source_code": "class FieldError(Exception):\n pass", + "docstring": "Some kind of problem with a model field.", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:FieldError" + }, + { + "library": "seaborn", + "name": "var_levels", + "source_code": "@property\ndef var_levels(self):\n for var in self.variables:\n if (map_obj := getattr(self, f'_{var}_map', None)) is not None:\n self._var_levels[var] = map_obj.levels\n return self._var_levels", + "docstring": "Property interface to ordered list of variables levels. Each time it's accessed, it updates the var_levels dictionary with the list of levels in the current semantic mappers. But it also allows the dictionary to persist, so it can be used to set levels by a key. This is used to track the list of col/row levels using an attached FacetGrid object, but it's kind of messy and ideally fixed by improving the faceting logic so it interfaces better with the modern approach to tracking plot variables.", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:var_levels arg:self arguments arg For If Compare Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "merge_call", + "source_code": "def merge_call(self, merge_fn, args=(), kwargs=None):\n require_replica_context(self)\n if kwargs is None:\n kwargs = {}\n merge_fn = autograph.tf_convert(merge_fn, autograph_ctx.control_status_ctx(), convert_by_default=False)\n return self._merge_call(merge_fn, args, kwargs)", + "docstring": "Merge args across replicas and run in a cross-replica context. This allows communication and coordination when there are multiple calls to the step_fn triggered by a call to . See for an explanation. If not inside a distributed scope, this is equivalent to: Args: merge_fn: Function that joins arguments from threads that are given as PerReplica. It accepts object as the first argument. args: List or tuple with positional per-thread arguments for . kwargs: Dict with keyword per-thread arguments for . Returns: The return value of , except for values which are unpacked.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:merge_call arg:self arg:merge_fn arg:args arg:kwargs arguments arg arg arg arg Call If Compare Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "deprecated_internal_learning_phase_scope", + "source_code": "@tf_contextlib.contextmanager\ndef deprecated_internal_learning_phase_scope(value):\n global _GRAPH_LEARNING_PHASES\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be 0 or 1.')\n with ops.init_scope():\n if context.executing_eagerly():\n previous_eager_value = _GRAPH_LEARNING_PHASES.get(_DUMMY_EAGER_GRAPH.key, None)\n previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)\n learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set\n try:\n deprecated_internal_set_learning_phase(value)\n yield\n finally:\n if not learning_phase_previously_set:\n _DUMMY_EAGER_GRAPH.learning_phase_is_set = False\n with ops.init_scope():\n if context.executing_eagerly():\n if previous_eager_value is not None:\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value\n elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]\n graph = get_graph()\n if previous_graph_value is not None:\n _GRAPH_LEARNING_PHASES[graph] = previous_graph_value\n elif graph in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[graph]", + "docstring": "An internal-only version of . Unlike the public method, this method does not raise a deprecation warning. This is needed because saved model saving needs to set learning phase to maintain compatibility with code that sets/gets the learning phase, but saved model saving itself shouldn't raise a deprecation warning. We can get rid of this method and its usages when the public API is removed. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if is neither nor .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:deprecated_internal_learning_phase_scope arg:value arguments arg If Compare Raise Call With Call If Call Assign Call Assign Call Call Assign Try Call If Assign With Call If Call If Compare Assign If Compare Assign Call If Compare Assign If Compare" + }, + { + "library": "tensorflow", + "name": "HeNormal", + "source_code": "class HeNormal(VarianceScaling):\n\n def __init__(self, seed=None):\n super(HeNormal, self).__init__(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}", + "docstring": "He normal initializer. Also available via the shortcut function . It draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015]( # pylint: disable=line-too-long ([pdf](", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "ClassDef name:HeNormal FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_validate_params", + "source_code": "def _validate_params(self):\n validate_parameter_constraints(self._parameter_constraints, self.get_params(deep=False), caller_name=self.__class__.__name__)", + "docstring": "Validate types and values of constructor parameters The expected type and values must be defined in the class attribute, which is a dictionary . See the docstring of for a description of the accepted constraints.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:_validate_params arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "register_lowering", + "source_code": "def register_lowering(aten_fn, broadcast=False, type_promotion_kind: Optional[ELEMENTWISE_TYPE_PROMOTION_KIND]=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, convert_input_to_bool=False) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n return functools.partial(_register_lowering, aten_fn, broadcast=broadcast, type_promotion_kind=type_promotion_kind, convert_input_to_bool=convert_input_to_bool)", + "docstring": "Shim to support decorator syntax.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:register_lowering arg:aten_fn arg:broadcast arg:type_promotion_kind arg:convert_input_to_bool arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "multiple_chunks", + "source_code": "def multiple_chunks(self, chunk_size=None):\n return self.size > (chunk_size or self.DEFAULT_CHUNK_SIZE)", + "docstring": "Return `` -- there's no good reason to read from memory in chunks.", + "type": "method", + "file_path": "django\\django\\core\\files\\base.py", + "ast_data": "FunctionDef name:multiple_chunks arg:self arg:chunk_size arguments arg arg Return return:yes Compare BoolOp" + }, + { + "library": "django", + "name": "_post_init", + "source_code": "def _post_init(self):\n self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz) if self.has_cs else None", + "docstring": "Perform post-initialization setup.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:_post_init arg:self arguments arg Assign Call Call" + }, + { + "library": "scipy", + "name": "integrate_gaussian", + "source_code": "def integrate_gaussian(self, mean, cov):\n mean = atleast_1d(squeeze(mean))\n cov = atleast_2d(cov)\n if mean.shape != (self.d,):\n raise ValueError(f'mean does not have dimension {self.d}')\n if cov.shape != (self.d, self.d):\n raise ValueError(f'covariance does not have dimension {self.d}')\n mean = mean[:, newaxis]\n sum_cov = self.covariance + cov\n sum_cov_chol = linalg.cho_factor(sum_cov)\n diff = self.dataset - mean\n tdiff = linalg.cho_solve(sum_cov_chol, diff)\n sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))\n norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det\n energies = np_vecdot(diff, tdiff, axis=0) / 2.0\n result = np_vecdot(exp(-energies), self.weights, axis=0) / norm_const\n return result", + "docstring": "Multiply estimated density by a multivariate Gaussian and integrate over the whole space. Parameters ---------- mean : aray_like A 1-D array, specifying the mean of the Gaussian. cov : array_like A 2-D array, specifying the covariance matrix of the Gaussian. Returns ------- result : scalar The value of the integral. Raises ------ ValueError If the mean or covariance of the input Gaussian differs from the KDE's dimensionality.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_kde.py", + "ast_data": "FunctionDef name:integrate_gaussian arg:self arg:mean arg:cov arguments arg arg arg Assign Call Call Assign Call If Compare Raise Call If Compare Raise Call Assign Assign Assign Call Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_connector_segment_str_at_line", + "source_code": "def _connector_segment_str_at_line(self, line: int) -> str:\n if self.upper_printer is None and self.lower_printer is None:\n return ''\n upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1\n lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1\n if line == 0:\n return ' __'\n elif line < upper_total_rows + 1:\n return ' | '\n elif line == upper_total_rows + 1:\n return ' |__'\n elif line < upper_total_rows + lower_total_rows + 1:\n return ' '\n return ''", + "docstring": "Get the connector segment string at the given line.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_connector_segment_str_at_line arg:self arg:line arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "global_toctree_for_doc", + "source_code": "def global_toctree_for_doc(env: BuildEnvironment, docname: str, builder: Builder, collapse: bool=False, includehidden: bool=True, maxdepth: int=0, titles_only: bool=False) -> Element | None:\n resolved = (_resolve_toctree(env, docname, builder, toctree_node, prune=True, maxdepth=int(maxdepth), titles_only=titles_only, collapse=collapse, includehidden=includehidden, tags=builder.tags) for toctree_node in env.master_doctree.findall(addnodes.toctree))\n toctrees = [toctree for toctree in resolved if toctree is not None]\n if not toctrees:\n return None\n result = toctrees[0]\n for toctree in toctrees[1:]:\n result.extend(toctree.children)\n return result", + "docstring": "Get the global ToC tree at a given document. This gives the global ToC, with all ancestors and their siblings.", + "type": "function", + "file_path": "sphinx\\sphinx\\environment\\adapters\\toctree.py", + "ast_data": "FunctionDef name:global_toctree_for_doc arg:env arg:docname arg:builder arg:collapse arg:includehidden arg:maxdepth arg:titles_only arguments arg arg arg arg arg arg arg Assign Call Call Call Assign Compare If Return return:no Assign For Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ShardingStrategy", + "source_code": "class ShardingStrategy(Enum):\n FULL_SHARD = auto()\n SHARD_GRAD_OP = auto()\n NO_SHARD = auto()\n HYBRID_SHARD = auto()\n _HYBRID_SHARD_ZERO2 = auto()", + "docstring": "This specifies the sharding strategy to be used for distributed training by :class:. - `DistributedDataParallel`, except this may provide even higher throughput since the unsharded parameters are not freed after the forward pass, saving the all-gathers in the pre-backward.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py", + "ast_data": "ClassDef name:ShardingStrategy Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "assert_non_singular", + "source_code": "def assert_non_singular(self, name='assert_non_singular'):\n with self._name_scope(name):\n return self._assert_non_singular()", + "docstring": "Returns an that asserts this operator is non singular. This operator is considered non-singular if Args: name: A string name to prepend to created ops. Returns: An , that, when run, will raise an if the operator is singular.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:assert_non_singular arg:self arg:name arguments arg arg With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_tma_workspace_arg", + "source_code": "def get_tma_workspace_arg(num_tma_descriptors: int, device: torch.device, num_programs: Optional[int]=None) -> WorkspaceArg:\n from .codegen.common import WorkspaceArg, WorkspaceZeroMode\n if num_programs is None:\n num_programs = get_num_sms()\n zero_mode = WorkspaceZeroMode.from_bool(False)\n size = num_programs * num_tma_descriptors * TMA_DESCRIPTOR_SIZE\n return WorkspaceArg(count=size, zero_mode=zero_mode, device=device, outer_name=WorkspaceArg.unique_name())", + "docstring": "Builds and returns a WorkspaceArg for the device side TMA workspace buffer.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:get_tma_workspace_arg arg:num_tma_descriptors arg:device arg:num_programs arguments arg arg arg If Compare Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "cc_normalize_flags", + "source_code": "def cc_normalize_flags(self, flags):\n assert isinstance(flags, list)\n if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:\n return self._cc_normalize_unix(flags)\n if self.cc_is_msvc or self.cc_is_iccw:\n return self._cc_normalize_win(flags)\n return flags", + "docstring": "Remove the conflicts that caused due gathering implied features flags. Parameters ---------- 'flags' list, compiler flags flags should be sorted from the lowest to the highest interest. Returns ------- list, filtered from any conflicts. Examples -------- >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) ['armv8.2-a+fp16+dotprod'] >>> self.cc_normalize_flags( ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] ) ['-march=core-avx2']", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:cc_normalize_flags arg:self arg:flags arguments arg arg Call If BoolOp Return return:yes Call If BoolOp Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "expect_partial", + "source_code": "def expect_partial(self):\n return self", + "docstring": "Silence warnings about incomplete checkpoint restores.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:expect_partial arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "count_nonzero_v2", + "source_code": "@tf_export('math.count_nonzero', v1=[])\n@dispatch.add_dispatch_support\ndef count_nonzero_v2(input, axis=None, keepdims=None, dtype=dtypes.int64, name=None):\n if keepdims is None:\n keepdims = False\n with ops.name_scope(name, 'count_nonzero', [input]):\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype == dtypes.bool:\n predicate = input\n else:\n zero = array_ops.zeros([], dtype=input.dtype)\n predicate = gen_math_ops.not_equal(input, zero)\n return cast(reduce_sum(cast(predicate, dtypes.int64), axis=axis, keepdims=keepdims), dtype=dtype)", + "docstring": "Computes number of nonzero elements across dimensions of a tensor. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: **NOTE** Strings are compared against zero-length empty string . Any string with a size greater than zero is already considered as nonzero. For example: Args: input: The tensor to reduce. Should be of numeric type, , or . axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to . name: A name for the operation (optional). Returns: The reduced tensor (number of nonzero values).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:count_nonzero_v2 arg:input arg:axis arg:keepdims arg:dtype arg:name arguments arg arg arg arg arg If Compare Assign With Call Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "sphinx", + "name": "get_original_image_uri", + "source_code": "def get_original_image_uri(self, name: str) -> str:\n while _StrPath(name) in self.env.original_image_uri:\n name = self.env.original_image_uri[_StrPath(name)]\n return name", + "docstring": "Get the original image URI.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\adapters\\asset.py", + "ast_data": "FunctionDef name:get_original_image_uri arg:self arg:name arguments arg arg While Compare Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "imshow_rgb", + "source_code": "def imshow_rgb(self, r, g, b, **kwargs):\n if not r.shape == g.shape == b.shape:\n raise ValueError(f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')\n RGB = np.dstack([r, g, b])\n R = np.zeros_like(RGB)\n R[:, :, 0] = r\n G = np.zeros_like(RGB)\n G[:, :, 1] = g\n B = np.zeros_like(RGB)\n B[:, :, 2] = b\n im_rgb = self.RGB.imshow(RGB, **kwargs)\n im_r = self.R.imshow(R, **kwargs)\n im_g = self.G.imshow(G, **kwargs)\n im_b = self.B.imshow(B, **kwargs)\n return (im_rgb, im_r, im_g, im_b)", + "docstring": "Create the four images {rgb, r, g, b}. Parameters ---------- r, g, b : array-like The red, green, and blue arrays. **kwargs Forwarded to calls for the four images. Returns ------- rgb : r : g : b :", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_rgb.py", + "ast_data": "FunctionDef name:imshow_rgb arg:self arg:r arg:g arg:b arguments arg arg arg arg arg If Compare Raise Call Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_slots", + "source_code": "def _create_slots(self, var_list):\n pass", + "docstring": "Create all slots needed by the variables. Args: var_list: A list of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_create_slots arg:self arg:var_list arguments arg arg" + }, + { + "library": "tensorflow", + "name": "split", + "source_code": "@tf_export('split')\n@dispatch.add_dispatch_support\ndef split(value, num_or_size_splits, axis=0, num=None, name='split'):\n if isinstance(num_or_size_splits, (numbers.Integral, tensor_shape.Dimension)):\n return gen_array_ops.split(axis=axis, num_split=num_or_size_splits, value=value, name=name)\n size_splits = ops.convert_to_tensor(num_or_size_splits)\n if size_splits._rank() == 0:\n raise ValueError('Rank-0 tensors are not supported as the num_or_size_splits argument to split. Argument provided: %s' % (num_or_size_splits,))\n if num is None:\n size_splits_shape = size_splits._shape_tuple()\n if size_splits_shape:\n num = size_splits_shape[0]\n if num is None:\n raise ValueError(f'Cannot infer argument `num` from shape {num_or_size_splits}')\n return gen_array_ops.split_v(value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)", + "docstring": "Splits a tensor into a list of sub tensors. See also . If is an , then it splits along the dimension into smaller tensors. This requires that is divisible by . If is a 1-D Tensor (or list), then is split into elements. The shape of the -th element has the same size as the except along dimension where the size is . For example: >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1)) >>> >>> # Split into 3 tensors along dimension 1 >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1) >>> tf.shape(s0).numpy() array([ 5, 10], dtype=int32) >>> >>> # Split into 3 tensors with sizes [4, 15, 11] along dimension 1 >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1) >>> tf.shape(split0).numpy() array([5, 4], dtype=int32) >>> tf.shape(split1).numpy() array([ 5, 15], dtype=int32) >>> tf.shape(split2).numpy() array([ 5, 11], dtype=int32) Args: value: The to split. num_or_size_splits: Either an indicating the number of splits along or a 1-D integer or Python list containing the sizes of each output tensor along . If an , then it must evenly divide ; otherwise the sum of sizes along the split axis must match that of the . axis: An or scalar . The dimension along which to split. Must be in the range . Defaults to 0. num: Optional, an , used to specify the number of outputs when it cannot be inferred from the shape of . name: A name for the operation (optional). Returns: if is an returns a list of objects; if is a 1-D list or 1-D returns objects resulting from splitting . Raises: ValueError: If is unspecified and cannot be inferred. ValueError: If is a scalar .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:split arg:value arg:num_or_size_splits arg:axis arg:num arg:name arguments arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Call Raise Call If Compare Assign Call If Assign If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "cast_to_model_input_dtypes", + "source_code": "def cast_to_model_input_dtypes(x, model):\n input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs)\n return nest.map_structure(math_ops.cast, x, input_dtypes)", + "docstring": "Casts the given data tensors to the dtypes of the model inputs. Args: x: tensor or list/tuple of tensors. model: The model. Returns: Converted input. Each tensor is casted to the corresponding input in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:cast_to_model_input_dtypes arg:x arg:model arguments arg arg Assign Call arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "getmro", + "source_code": "def getmro(cls):\n return _inspect.getmro(cls)", + "docstring": "TFDecorator-aware replacement for inspect.getmro.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:getmro arg:cls arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_fromnxfunction_seq", + "source_code": "class _fromnxfunction_seq(_fromnxfunction):\n\n def __call__(self, x, *args, **params):\n func = getattr(np, self.__name__)\n _d = func(tuple((np.asarray(a) for a in x)), *args, **params)\n _m = func(tuple((getmaskarray(a) for a in x)), *args, **params)\n return masked_array(_d, mask=_m)", + "docstring": "A version of that is called with a single sequence of arrays followed by auxiliary args that are passed verbatim for both the data and mask calls.", + "type": "class", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "ClassDef name:_fromnxfunction_seq FunctionDef name:__call__ arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_is_output_dtype_supported_by_backend", + "source_code": "def _is_output_dtype_supported_by_backend(node: Node, qconfig: QConfigAny, dtype_config: DTypeConfig) -> bool:\n backend_config_output_dtype = dtype_config.output_dtype\n qconfig_output_dtype = None\n output_act_obs_or_fq_ctr = node.meta['target_dtype_info'].get('output_act_obs_or_fq_ctr', _DEFAULT_FP32_OBS_OR_FQ_CTR)\n output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None\n qconfig_output_dtype, qconfig_output_is_dynamic = _get_dtype_and_is_dynamic(output_act_obs_or_fq)\n if qconfig_output_is_dynamic:\n qconfig_output_dtype = torch.float32\n dtype_matches = qconfig_output_dtype == backend_config_output_dtype\n qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints(qconfig, dtype_config.output_dtype_with_constraints)\n return backend_config_output_dtype is None or (dtype_matches and qconfig_satisfies_constraints)", + "docstring": "Check if the configured qconfig for the output is supported by the backend or not", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py", + "ast_data": "FunctionDef name:_is_output_dtype_supported_by_backend arg:node arg:qconfig arg:dtype_config arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call If Assign Assign Compare Assign Call Return return:yes BoolOp Compare BoolOp" + }, + { + "library": "matplotlib", + "name": "get_hatch_path", + "source_code": "def get_hatch_path(self, density=6.0):\n hatch = self.get_hatch()\n if hatch is None:\n return None\n return Path.hatch(hatch, density)", + "docstring": "Return a for the current hatch.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:get_hatch_path arg:self arg:density arguments arg arg Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "kornia", + "name": "cam2pixel", + "source_code": "def cam2pixel(cam_coords_src: Tensor, dst_proj_src: Tensor, eps: float=1e-12) -> Tensor:\n if not len(cam_coords_src.shape) == 4 and cam_coords_src.shape[3] == 3:\n raise ValueError(f'Input cam_coords_src has to be in the shape of BxHxWx3. Got {cam_coords_src.shape}')\n if not len(dst_proj_src.shape) == 3 and dst_proj_src.shape[-2:] == (4, 4):\n raise ValueError(f'Input dst_proj_src has to be in the shape of Bx4x4. Got {dst_proj_src.shape}')\n point_coords: Tensor = transform_points(dst_proj_src[:, None], cam_coords_src)\n x_coord: Tensor = point_coords[..., 0]\n y_coord: Tensor = point_coords[..., 1]\n z_coord: Tensor = point_coords[..., 2]\n u_coord: Tensor = x_coord / (z_coord + eps)\n v_coord: Tensor = y_coord / (z_coord + eps)\n pixel_coords_dst: Tensor = stack([u_coord, v_coord], dim=-1)\n return pixel_coords_dst", + "docstring": "Transform coordinates in the camera frame to the pixel frame. Args: cam_coords_src: (x, y, z) coordinates defined in the first camera coordinates system. Shape must be BxHxWx3. dst_proj_src: the projection matrix between the reference and the non reference camera frame. Shape must be Bx4x4. eps: small value to avoid division by zero error. Returns: tensor of shape BxHxWx2 with (u, v) pixel coordinates.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:cam2pixel arg:cam_coords_src arg:dst_proj_src arg:eps arguments arg arg arg If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "maybe_reorder_for_minimizing_partition", + "source_code": "def maybe_reorder_for_minimizing_partition(self, nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n from .memory import estimate_peak_memory, prepare_planning_info\n graph_outputs = OrderedSet(V.graph.get_output_names())\n default_peak_memory, name_to_freeable_input_buf = prepare_planning_info(nodes, self.name_to_buf, self.name_to_fused_node, OrderedSet(V.graph.graph_inputs.keys()), graph_outputs)\n reordered_nodes = self.reorder_for_minimizing_partition(nodes)\n reorder_peak_memory, _ = estimate_peak_memory(reordered_nodes, name_to_freeable_input_buf, graph_outputs)\n if reorder_peak_memory < default_peak_memory * 1.1:\n return reordered_nodes\n return nodes", + "docstring": "Reorder nodes to minimize the number of partitions if this only slightly increase peak memory.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:maybe_reorder_for_minimizing_partition arg:self arg:nodes arguments arg arg Assign Call Call Assign Call Call Call Assign Call Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_to_tensor", + "source_code": "def _to_tensor(dataset_id) -> tensor.Tensor:\n if isinstance(dataset_id, tensor.Tensor):\n return dataset_id\n if isinstance(dataset_id, str) or isinstance(dataset_id, bytes):\n return ops.convert_to_tensor(dataset_id, dtype=dtypes.string, name='dataset_id')\n return ops.convert_to_tensor(dataset_id, dtype=dtypes.int64, name='dataset_id')", + "docstring": "Converts to Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py", + "ast_data": "FunctionDef name:_to_tensor arg:dataset_id arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "maybe_disable_inference_mode_for_fake_prop", + "source_code": "@contextmanager\ndef maybe_disable_inference_mode_for_fake_prop() -> Generator[None, None, None]:\n if config.fake_tensor_disable_inference_mode:\n with torch._subclasses.meta_utils.disable_inference_mode_for_fake_prop():\n yield\n else:\n yield", + "docstring": "Turns off tracking of inference_mode for fake tensor propagation. With this context manager, when a real tensor is converted to fake tensor, the fake tensor looses its inference-ness.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:maybe_disable_inference_mode_for_fake_prop arguments If With Call" + }, + { + "library": "matplotlib", + "name": "clip_to_bbox", + "source_code": "def clip_to_bbox(self, bbox, inside=True):\n verts = _path.clip_path_to_rect(self, bbox, inside)\n paths = [Path(poly) for poly in verts]\n return self.make_compound_path(*paths)", + "docstring": "Clip the path to the given bounding box. The path must be made up of one or more closed polygons. This algorithm will not behave correctly for unclosed paths. If *inside* is , clip to the inside of the box, otherwise to the outside of the box.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:clip_to_bbox arg:self arg:bbox arg:inside arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "disable_resource_variables", + "source_code": "@deprecation.deprecated(None, 'non-resource variables are not supported in the long term')\n@tf_export(v1=['disable_resource_variables'])\ndef disable_resource_variables() -> None:\n global _DEFAULT_USE_RESOURCE\n _DEFAULT_USE_RESOURCE = False\n logging.vlog(1, 'Disabling resource variables')\n _api_usage_gauge.get_cell().set(False)", + "docstring": "Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variables_toggle.py", + "ast_data": "FunctionDef name:disable_resource_variables arguments Assign Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "_proj_transform_clip", + "source_code": "def _proj_transform_clip(xs, ys, zs, M, focal_length):\n vec = _vec_pad_ones(xs, ys, zs)\n return _proj_transform_vec_clip(vec, M, focal_length)", + "docstring": "Transform the points by the projection matrix and return the clipping result returns txs, tys, tzs, tis", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py", + "ast_data": "FunctionDef name:_proj_transform_clip arg:xs arg:ys arg:zs arg:M arg:focal_length arguments arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "set_output", + "source_code": "def set_output(self, *, transform=None):\n super().set_output(transform=transform)\n for _, step, _ in self._iter():\n _safe_set_output(step, transform=transform)\n return self", + "docstring": "Set the output container when and are called. will set the output of all estimators in . Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged Returns ------- self : estimator instance Estimator instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:set_output arg:self arguments arg arg Call Call For Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_object", + "source_code": "@classmethod\ndef get_object(cls, obj, transposed: bool):\n if transposed:\n obj = obj.T\n return obj", + "docstring": "these are written transposed", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:get_object arg:cls arg:obj arg:transposed arguments arg arg arg If Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_subgraph_closed", + "source_code": "def _check_subgraph_closed(n, reachable_by_input, input_nodes_set, name_to_input_name):\n next_to_visit = [n]\n visited = set()\n while next_to_visit:\n current_node = next_to_visit.pop()\n visited.add(current_node)\n if current_node in reachable_by_input and current_node not in input_nodes_set:\n raise TypeError('Node %s uses input %s not in input_nodes.' % (n, current_node))\n if current_node not in input_nodes_set:\n next_to_visit += [input_node for input_node in name_to_input_name[current_node] if input_node not in visited]", + "docstring": "Checks to make sure node only connects to predecessor graph through inputs. Args: n: Node to check reachable_by_input: Nodes that are reachable by all inputs of subgraph input_nodes_set: The set of nodes that are \"inputs\". name_to_input_name: Maps from name to the list of inputs. Raises: TypeError: If the given node uses items past inputs directly.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:_check_subgraph_closed arg:n arg:reachable_by_input arg:input_nodes_set arg:name_to_input_name arguments arg arg arg arg Assign Assign Call While Assign Call Call If BoolOp Compare Compare Raise Call If Compare Compare" + }, + { + "library": "kornia", + "name": "Jr", + "source_code": "@staticmethod\ndef Jr(vec: Tensor) -> Tensor:\n return So3.right_jacobian(vec)", + "docstring": "Alias for right jacobian. Args: vec: the input point of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:Jr arg:vec arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, graph=None, op_log=None):\n if not graph and (not context.executing_eagerly()):\n graph = ops.get_default_graph()\n self._coverage = 0.0\n self._graph = graph\n op_log = tfprof_logger.merge_default_with_oplog(self._graph, op_log=op_log)\n print_mdl.NewProfiler(_graph_string(self._graph), op_log.SerializeToString())", + "docstring": "Constructor. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define extra op types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:op_log arguments arg arg arg If BoolOp Call Assign Call Assign Assign Assign Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "sysfiles", + "source_code": "def sysfiles(self):\n search_mod_names = filter(re.compile(self.match).match, list(sys.modules.keys()))\n mods = map(sys.modules.get, search_mod_names)\n return set(filter(None, map(self._file_for_module, mods)))", + "docstring": "Return a Set of sys.modules filenames to monitor.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:sysfiles arg:self arguments arg Assign Call Call Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "gradient_proba", + "source_code": "def gradient_proba(self, y_true, raw_prediction, sample_weight=None, gradient_out=None, proba_out=None, n_threads=1):\n if gradient_out is None:\n if proba_out is None:\n gradient_out = np.empty_like(raw_prediction)\n proba_out = np.empty_like(raw_prediction)\n else:\n gradient_out = np.empty_like(proba_out)\n elif proba_out is None:\n proba_out = np.empty_like(gradient_out)\n self.closs.gradient_proba(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, gradient_out=gradient_out, proba_out=proba_out, n_threads=n_threads)\n return (gradient_out, proba_out)", + "docstring": "Compute gradient and class probabilities fow raw_prediction. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. gradient_out : None or array of shape (n_samples, n_classes) A location into which the gradient is stored. If None, a new array might be created. proba_out : None or array of shape (n_samples, n_classes) A location into which the class probabilities are stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- gradient : array of shape (n_samples, n_classes) Element-wise gradients. proba : array of shape (n_samples, n_classes) Element-wise class probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:gradient_proba arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:gradient_out arg:proba_out arg:n_threads arguments arg arg arg arg arg arg arg If Compare If Compare Assign Call Assign Call Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "depthwise_conv2d_native_backprop_filter", + "source_code": "@tf_export('nn.depthwise_conv2d_backprop_filter', v1=['nn.depthwise_conv2d_native_backprop_filter', 'nn.depthwise_conv2d_backprop_filter'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('nn.depthwise_conv2d_native_backprop_filter')\ndef depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, data_format='NHWC', dilations=[1, 1, 1, 1], name=None):\n padding, explicit_paddings = convert_padding(padding)\n return gen_nn_ops.depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name)", + "docstring": "Computes the gradients of depthwise convolution with respect to the filter. Args: input: A . Must be one of the following types: , , , . 4-D with shape based on . For example, if is 'NHWC' then is a 4-D tensor. filter_sizes: A of type . An integer vector representing the tensor shape of , where is a 4-D tensor. out_backprop: A . Must have the same type as . 4-D with shape based on . For example, if is 'NHWC' then out_backprop shape is . Gradients w.r.t. the output of the convolution. strides: A list of . The stride of the sliding window for each dimension of the input of the convolution. padding: Controls how to pad the image before applying the convolution. Can be the string or indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here]( for more information. When explicit padding is used and data_format is , this should be in the form . When explicit padding used and data_format is , this should be in the form . data_format: An optional from: . Defaults to . Specify the data format of the input and output data. With the default format \"NHWC\", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be \"NCHW\", the data storage order of: [batch, channels, height, width]. dilations: An optional list of . Defaults to . 1-D tensor of length 4. The dilation factor for each dimension of . If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of , see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:depthwise_conv2d_native_backprop_filter arg:input arg:filter_sizes arg:out_backprop arg:strides arg:padding arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_retrieve_variables_impl", + "source_code": "@def_function.function\ndef _retrieve_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n for table in table_config:\n retrieved = table.optimizer._retrieve()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config)\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n for i, slot in enumerate(['parameters'] + table.optimizer._slot_names()):\n sharded_var = variables[table.name][slot]\n if host_id < len(sharded_var.variables):\n sharded_var.variables[host_id].assign(retrieved[i])\n config = None", + "docstring": "Retrieve embedding tables from TPU to host memory. Args: config: A serialized TPUEmbeddingConfiguration proto. hosts: A list of all the host CPU devices. variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key is the table name, second key is 'parameters' or the optimizer slot name. table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_retrieve_variables_impl arg:config arg:hosts arg:variables arg:table_config arguments arg arg arg arg For Call With Call For Assign Call Call Call If Call Assign For Call Call Assign If Compare Call Call Assign" + }, + { + "library": "kornia", + "name": "__len__", + "source_code": "def __len__(self) -> int:\n return self.colors.shape[-1]", + "docstring": "Return the number of colors in the colormap. Returns: Number of colors in the colormap.", + "type": "method", + "file_path": "kornia\\kornia\\color\\colormap.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_dataset", + "source_code": "@abc.abstractmethod\ndef get_dataset(self):\n raise NotImplementedError", + "docstring": "Get a dataset instance for the current DataAdapter. Note that the dataset returned does not repeat for epoch, so caller might need to create new iterator for the same dataset at the beginning of the epoch. This behavior might change in future. Returns: An tf.dataset.Dataset. Caller might use the dataset in different context, eg iter(dataset) in eager to get the value directly, or in graph mode, provide the iterator tensor to Keras model function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:get_dataset arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "avg_pool2d", + "source_code": "@tf_export('nn.avg_pool2d', v1=[])\n@dispatch.add_dispatch_support\ndef avg_pool2d(input, ksize, strides, padding, data_format='NHWC', name=None):\n with ops.name_scope(name, 'AvgPool2D', [input]) as name:\n if data_format is None:\n data_format = 'NHWC'\n channel_index = 1 if data_format.startswith('NC') else 3\n ksize = _get_sequence(ksize, 2, channel_index, 'ksize')\n strides = _get_sequence(strides, 2, channel_index, 'strides')\n return gen_nn_ops.avg_pool(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)", + "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Args: input: A 4-D of shape and type , , , , or . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. Returns: A with the same type as . The average pooled output tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:avg_pool2d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "vit_giant2", + "source_code": "def vit_giant2(patch_size=16, **kwargs):\n model = DinoVisionTransformer(patch_size=patch_size, embed_dim=1536, depth=40, num_heads=24, mlp_ratio=4, block_fn=partial(Block, attn_class=MemEffAttention), **kwargs)\n return model", + "docstring": "Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\dinov2.py", + "ast_data": "FunctionDef name:vit_giant2 arg:patch_size arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "inverse_stft_window_fn", + "source_code": "@tf_export('signal.inverse_stft_window_fn')\n@dispatch.add_dispatch_support\ndef inverse_stft_window_fn(frame_step, forward_window_fn=window_ops.hann_window, name=None):\n\n def inverse_stft_window_fn_inner(frame_length, dtype):\n with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):\n frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')\n frame_step_.shape.assert_has_rank(0)\n frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n frame_length.shape.assert_has_rank(0)\n forward_window = forward_window_fn(frame_length, dtype=dtype)\n denom = math_ops.square(forward_window)\n overlaps = -(-frame_length // frame_step_)\n denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])\n denom = array_ops.reshape(denom, [overlaps, frame_step_])\n denom = math_ops.reduce_sum(denom, 0, keepdims=True)\n denom = array_ops.tile(denom, [overlaps, 1])\n denom = array_ops.reshape(denom, [overlaps * frame_step_])\n return forward_window / denom[:frame_length]\n return inverse_stft_window_fn_inner", + "docstring": "Generates a window function that can be used in . Constructs a window that is equal to the forward window with a further pointwise amplitude correction. is equivalent to in the case where it would produce an exact inverse. See examples in documentation for usage. Args: frame_step: An integer scalar . The number of samples to step. forward_window_fn: window_fn used in the forward transform, . name: An optional name for the operation. Returns: A callable that takes a window length and a keyword argument and returns a of samples in the provided datatype. The returned window is suitable for reconstructing original waveform in inverse_stft.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\spectral_ops.py", + "ast_data": "FunctionDef name:inverse_stft_window_fn arg:frame_step arg:forward_window_fn arg:name arguments arg arg arg FunctionDef name:inverse_stft_window_fn_inner arg:frame_length arg:dtype arguments arg arg With Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "release_zoom", + "source_code": "def release_zoom(self, event):\n if self._zoom_info is None:\n return\n self.canvas.mpl_disconnect(self._zoom_info.cid)\n self.remove_rubberband()\n start_x, start_y = self._zoom_info.start_xy\n direction = 'in' if self._zoom_info.button == 1 else 'out'\n key = event.key\n if self._zoom_info.cbar == 'horizontal':\n key = 'x'\n elif self._zoom_info.cbar == 'vertical':\n key = 'y'\n if abs(event.x - start_x) < 5 and key != 'y' or (abs(event.y - start_y) < 5 and key != 'x'):\n self._cleanup_post_zoom()\n return\n for i, ax in enumerate(self._zoom_info.axes):\n twinx = any((ax.get_shared_x_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]))\n twiny = any((ax.get_shared_y_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]))\n ax._set_view_from_bbox((start_x, start_y, event.x, event.y), direction, key, twinx, twiny)\n self._cleanup_post_zoom()\n self.push_current()", + "docstring": "Callback for mouse button release in zoom to rect mode.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:release_zoom arg:self arg:event arguments arg arg If Compare Return return:no Call Call Assign Assign Compare Assign If Compare Assign If Compare Assign If BoolOp BoolOp Compare Call Compare BoolOp Compare Call Compare Call Return return:no For Call Assign Call Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "annotate_orig_fx_with_snodes", + "source_code": "def annotate_orig_fx_with_snodes(gm: torch.fx.GraphModule, snodes: SchedulerNodeList) -> None:\n node_name_to_buf_name: dict[str, str] = {}\n update_orig_fx_node_name_to_buf_name(snodes, node_name_to_buf_name)\n if node_name_to_buf_name is None:\n return\n node_name_to_buf_meta = get_node_name_to_buf_meta(node_name_to_buf_name)\n for node in gm.graph.nodes:\n if node.name in node_name_to_buf_meta:\n node.meta['buf_meta'] = node_name_to_buf_meta.get(node.name)", + "docstring": "Creates a FX Graph from a list of SchedulerNode objects.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\debug.py", + "ast_data": "FunctionDef name:annotate_orig_fx_with_snodes arg:gm arg:snodes arguments arg arg Call If Compare Return return:no Assign Call For If Compare Assign Call" + }, + { + "library": "scikit-learn", + "name": "get_chunk_n_rows", + "source_code": "def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):\n if working_memory is None:\n working_memory = get_config()['working_memory']\n chunk_n_rows = int(working_memory * 2 ** 20 // row_bytes)\n if max_n_rows is not None:\n chunk_n_rows = min(chunk_n_rows, max_n_rows)\n if chunk_n_rows < 1:\n warnings.warn('Could not adhere to working_memory config. Currently %.0fMiB, %.0fMiB required.' % (working_memory, np.ceil(row_bytes * 2 ** (-20))))\n chunk_n_rows = 1\n return chunk_n_rows", + "docstring": "Calculate how many rows can be processed within . Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. max_n_rows : int, default=None The maximum return value. working_memory : int or float, default=None The number of rows to fit inside this number of MiB will be returned. When None (default), the value of `working_memoryrow_bytes exceeds MiB.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_chunking.py", + "ast_data": "FunctionDef name:get_chunk_n_rows arg:row_bytes arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Call If Compare Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_device_policy", + "source_code": "@tf_export('config.experimental.set_device_policy')\ndef set_device_policy(device_policy):\n if device_policy == 'silent':\n context.context().device_policy = context.DEVICE_PLACEMENT_SILENT\n elif device_policy == 'silent_for_int32':\n context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32\n elif device_policy == 'warn':\n context.context().device_policy = context.DEVICE_PLACEMENT_WARN\n elif device_policy == 'explicit':\n context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT\n elif device_policy is None:\n context.context().device_policy = None\n else:\n raise ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_device_policy for valid `device_policy` arguments.')", + "docstring": "Sets the current thread device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). When using the default, an appropriate policy will be picked automatically. The default policy may change over time. This function only sets the device policy for the current thread. Any subsequently started thread will again use the default policy. Args: device_policy: A device policy. Valid values: - None: Switch to a system default. - 'warn': Copies the tensors which are not on the right device and logs a warning. - 'explicit': Raises an error if the placement is not as required. - 'silent': Silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - 'silent_for_int32': silently copies tensors, raising errors on the other ones. Raises: ValueError: If an invalid is passed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_device_policy arg:device_policy arguments arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_ConversionContext", + "source_code": "class _ConversionContext(enum.Enum):\n VALUE = 1\n SPEC = 2\n DEFAULT = 3", + "docstring": "Enum to indicate what kind of value is being converted. Used by and and their helper methods.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py", + "ast_data": "ClassDef name:_ConversionContext Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "scatter_update", + "source_code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n return gen_state_ops.scatter_update(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", + "docstring": "Assigns to this variable. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "ward", + "source_code": "@lazy_cython\ndef ward(y):\n return linkage(y, method='ward', metric='euclidean')", + "docstring": "Perform Ward's linkage on a condensed distance matrix. See for more information on the return structure and algorithm. The following are common calling conventions: 1. `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:ward arg:y arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_dtype_to_na_value", + "source_code": "def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):\n if isinstance(dtype, ExtensionDtype):\n return dtype.na_value\n elif dtype.kind in 'mM':\n return dtype.type('NaT')\n elif dtype.kind in 'fc':\n return dtype.type('NaN')\n elif dtype.kind == 'b':\n return None\n elif dtype.kind in 'iu':\n if not has_none_blocks:\n return None\n return np.nan\n elif dtype.kind == 'O':\n return np.nan\n raise NotImplementedError", + "docstring": "Find the NA value to go with this dtype.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_dtype_to_na_value arg:dtype arg:has_none_blocks arguments arg arg If Call Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:no If Compare If Return return:no Return return:yes If Compare Return return:yes Raise" + }, + { + "library": "django", + "name": "TruncWeek", + "source_code": "class TruncWeek(TruncBase):\n kind = 'week'", + "docstring": "Truncate to midnight on the Monday of the week.", + "type": "class", + "file_path": "django\\django\\db\\models\\functions\\datetime.py", + "ast_data": "ClassDef name:TruncWeek Assign" + }, + { + "library": "tensorflow", + "name": "_create_polynomial", + "source_code": "def _create_polynomial(var, coeffs):\n coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)\n if not coeffs.size:\n return array_ops.zeros_like(var)\n return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var", + "docstring": "Compute n_th order polynomial via Horner's method.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py", + "ast_data": "FunctionDef name:_create_polynomial arg:var arg:coeffs arguments arg arg Assign Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "TverskyLoss", + "source_code": "class TverskyLoss(nn.Module):\n\n def __init__(self, alpha: float, beta: float, eps: float=1e-08, ignore_index: Optional[int]=-100) -> None:\n super().__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n self.ignore_index: Optional[int] = ignore_index\n\n def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(pred, target, self.alpha, self.beta, self.eps, self.ignore_index)", + "docstring": "Criterion that computes Tversky Coefficient loss. According to :cite:, we compute the Tversky Coefficient as follows: .. math:: \\text{S}(P, G, \\alpha; \\beta) = \\frac{|PG|}{|PG| + \\alpha |P \\setminus G| + \\beta |G \\setminus P|} Where: - :math: and :math: are the predicted and ground truth binary labels. - :math: and :math: control the magnitude of the penalties for FPs and FNs, respectively. Note: - :math: => dice coeff - :math: => tanimoto coeff - :math: => F beta coeff Args: alpha: the first coefficient in the denominator. beta: the second coefficient in the denominator. eps: scalar for numerical stability. ignore_index: labels with this value are ignored in the loss computation. Shape: - Pred: :math: where C = number of classes. - Target: :math: where each value is :math:. Examples: >>> N = 5 # num_classes >>> criterion = TverskyLoss(alpha=0.5, beta=0.5) >>> pred = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = criterion(pred, target) >>> output.backward()", + "type": "class", + "file_path": "kornia\\kornia\\losses\\tversky.py", + "ast_data": "ClassDef name:TverskyLoss FunctionDef name:__init__ arg:self arg:alpha arg:beta arg:eps arg:ignore_index arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Tensor_summary", + "source_code": "def Tensor_summary(valobj: Any, internal_dict: Any, options: Any) -> str:\n with DisableBreakpoints():\n target = get_target()\n tensor = valobj.GetName()\n result = target.EvaluateExpression(f'torch::gdb::tensor_repr({tensor})')\n str_result = str(result)\n target.EvaluateExpression(f'(void)free({result.GetValue()})')\n str_result = '\\n' + str_result[str_result.find('tensor'):-1]\n return str_result", + "docstring": "Print a human readable representation of the given at::Tensor. at::Tensor instances do not have a C++ implementation of a repr method: in pytorch, this is done by pure-Python code. As such, print internally creates a Python wrapper for the given tensor and call repr() on it. Usage: print self", + "type": "function", + "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py", + "ast_data": "FunctionDef name:Tensor_summary arg:valobj arg:internal_dict arg:options arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "open_spider", + "source_code": "@deferred_f_from_coro_f\nasync def open_spider(self, spider: Spider) -> None:\n self.slot = Slot(self.crawler.settings.getint('SCRAPER_SLOT_MAX_ACTIVE_SIZE'))\n await maybe_deferred_to_future(self.itemproc.open_spider(spider))", + "docstring": "Open the given spider for scraping and allocate resources for it", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\scraper.py", + "ast_data": "AsyncFunctionDef name:open_spider arg:self arg:spider arguments arg arg Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "EmbeddingFeature", + "source_code": "class EmbeddingFeature(enum.Enum):\n UNSUPPORTED = 'UNSUPPORTED'\n V1 = 'V1'\n V2 = 'V2'", + "docstring": "Embedding feature flag strings. UNSUPPORTED: No embedding lookup accelerator available on the tpu. V1: Embedding lookup accelerator V1. The embedding lookup operation can only be placed at the beginning of computation. Only one instance of embedding lookup layer is allowed. V2: Embedding lookup accelerator V2. The embedding lookup operation can be placed anywhere of the computation. Multiple instances of embedding lookup layer is allowed.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", + "ast_data": "ClassDef name:EmbeddingFeature Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "BorgTPUTerminationConfig", + "source_code": "class BorgTPUTerminationConfig(TerminationConfig):\n\n def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n self.termination_watcher_fn = termination_watcher_fn\n self.exit_fn = exit_fn or failure_handling_util.default_tpu_exit_fn\n self.grace_period = grace_period or 0\n self.save_fn = save_fn", + "docstring": "Configurations for Borg.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "ClassDef name:BorgTPUTerminationConfig FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign Assign BoolOp Assign BoolOp Assign" + }, + { + "library": "tensorflow", + "name": "_resolve_task_configuration", + "source_code": "def _resolve_task_configuration(self):\n hostlist = self._resolve_hostlist()\n tasks_per_node = expand_tasks_per_node(_get_slurm_var('STEP_TASKS_PER_NODE'))\n return {host: num_tasks for host, num_tasks in zip(hostlist, tasks_per_node)}", + "docstring": "Creates a mapping of hostnames to the number of tasks allocated on it. Reads the SLURM environment to determine the nodes involved in the current job step and number of tasks running on each node. Returns a dictionary mapping each hostname to the number of tasks.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py", + "ast_data": "FunctionDef name:_resolve_task_configuration arg:self arguments arg Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_subsystem", + "source_code": "@classmethod\ndef get_subsystem(cls) -> Optional[str]:\n if (val := get_env_val('TORCH_BISECT_SUBSYSTEM')):\n return val\n file_path = os.path.join(cls.get_dir(), 'bisect_status.txt')\n lines = cls.read_lines_from_file(file_path)\n for line in lines:\n if line.startswith('subsystem='):\n out = line.strip().split('=')[1]\n return out if out else None\n return None", + "docstring": "Returns the active subsystem, if any", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\compiler_bisector.py", + "ast_data": "FunctionDef name:get_subsystem arg:cls arguments arg If Call Return return:yes Assign Call Call Assign Call For If Call Assign Call Call Return return:yes Return return:no" + }, + { + "library": "scipy", + "name": "shift", + "source_code": "def shift(x, a, period=None, _cache=_cache):\n if isinstance(_cache, threading.local):\n if not hasattr(_cache, 'shift_cache'):\n _cache.shift_cache = {}\n _cache = _cache.shift_cache\n tmp = asarray(x)\n if iscomplexobj(tmp):\n return shift(tmp.real, a, period, _cache) + 1j * shift(tmp.imag, a, period, _cache)\n if period is not None:\n a = a * 2 * pi / period\n n = len(x)\n omega = _cache.get((n, a))\n if omega is None:\n if len(_cache) > 20:\n while _cache:\n _cache.popitem()\n\n def kernel_real(k, a=a):\n return cos(a * k)\n\n def kernel_imag(k, a=a):\n return sin(a * k)\n omega_real = convolve.init_convolution_kernel(n, kernel_real, d=0, zero_nyquist=0)\n omega_imag = convolve.init_convolution_kernel(n, kernel_imag, d=1, zero_nyquist=0)\n _cache[n, a] = (omega_real, omega_imag)\n else:\n omega_real, omega_imag = omega\n overwrite_x = _datacopied(tmp, x)\n return convolve.convolve_z(tmp, omega_real, omega_imag, overwrite_x=overwrite_x)", + "docstring": "Shift periodic sequence x by a: y(u) = x(u+a). If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f Parameters ---------- x : array_like The array to take the pseudo-derivative from. a : float Defines the parameters of the sinh/sinh pseudo-differential period : float, optional The period of the sequences x and y. Default period is ``.", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", + "ast_data": "FunctionDef name:shift arg:x arg:a arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel_real arg:k arg:a arguments arg arg Return return:yes Call FunctionDef name:kernel_imag arg:k arg:a arguments arg arg Return return:yes Call Assign Call Assign Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "array_function_dispatch", + "source_code": "def array_function_dispatch(dispatcher=None, module=None, verify=True, docs_from_dispatcher=False):\n\n def decorator(implementation):\n if verify:\n if dispatcher is not None:\n verify_matching_signatures(implementation, dispatcher)\n else:\n co = implementation.__code__\n last_arg = co.co_argcount + co.co_kwonlyargcount - 1\n last_arg = co.co_varnames[last_arg]\n if last_arg != 'like' or co.co_kwonlyargcount == 0:\n raise RuntimeError(f'__array_function__ expects `like=` to be the last argument and a keyword-only argument. {implementation} does not seem to comply.')\n if docs_from_dispatcher:\n add_docstring(implementation, dispatcher.__doc__)\n public_api = _ArrayFunctionDispatcher(dispatcher, implementation)\n public_api = functools.wraps(implementation)(public_api)\n if module is not None:\n public_api.__module__ = module\n ARRAY_FUNCTIONS.add(public_api)\n return public_api\n return decorator", + "docstring": "Decorator for adding dispatch with the __array_function__ protocol. See NEP-18 for example usage. Parameters ---------- dispatcher : callable or None Function that when called like `Nonelike=like=like`. docs_from_dispatcher : bool, optional If True, copy docs from the dispatcher function onto the dispatched function, rather than from the implementation. This is useful for functions defined in C, which otherwise don't have docstrings. Returns ------- Function suitable for decorating the implementation of a NumPy function.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\overrides.py", + "ast_data": "FunctionDef name:array_function_dispatch arg:dispatcher arg:module arg:verify arg:docs_from_dispatcher arguments arg arg arg arg FunctionDef name:decorator arg:implementation arguments arg If If Compare Call Assign Assign Assign If BoolOp Compare Compare Raise Call If Call Assign Call Assign Call Call If Compare Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "floordiv", + "source_code": "def floordiv(self, x0: T, x1: T) -> T:\n raise NotImplementedError", + "docstring": "Python-style floor division between integers only. Computes the true division of two numbers and floors the result. If you want floor division for floats, do regular truediv and floor the result.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:floordiv arg:self arg:x0 arg:x1 arguments arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "set_optimizer_jit", + "source_code": "@tf_export('config.optimizer.set_jit')\n@deprecation.deprecated_arg_values(None, '`True` setting is deprecated, use `autoclustering` instead.', warn_once=True, jit_config=True)\ndef set_optimizer_jit(enabled: Union[bool, str]):\n autoclustering_enabled = enabled in (True, 'autoclustering')\n context.context().optimizer_jit = autoclustering_enabled", + "docstring": "Configure JIT compilation. Note: compilation is only applied to code that is compiled into a graph (in TF2 that's only a code inside ). Args: enabled: JIT compilation configuration. Possible values: - ( is a deprecated alias): perform [autoclustering]( (automatically identify and compile clusters of nodes) on all graphs using [XLA]( - : do not automatically compile any graphs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_optimizer_jit arg:enabled arguments arg Assign Compare Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "update_bbox_position_size", + "source_code": "def update_bbox_position_size(self, renderer):\n if self._bbox_patch:\n posx = float(self.convert_xunits(self._x))\n posy = float(self.convert_yunits(self._y))\n posx, posy = self.get_transform().transform((posx, posy))\n x_box, y_box, w_box, h_box = _get_textbox(self, renderer)\n self._bbox_patch.set_bounds(0.0, 0.0, w_box, h_box)\n self._bbox_patch.set_transform(Affine2D().rotate_deg(self.get_rotation()).translate(posx + x_box, posy + y_box))\n fontsize_in_pixel = renderer.points_to_pixels(self.get_size())\n self._bbox_patch.set_mutation_scale(fontsize_in_pixel)", + "docstring": "Update the location and the size of the bbox. This method should be used when the position and size of the bbox needs to be updated before actually drawing the bbox.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:update_bbox_position_size arg:self arg:renderer arguments arg arg If Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Call Call Call Call Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "contains_point", + "source_code": "def contains_point(self, point, radius=None):\n radius = self._process_radius(radius)\n return self.get_path().contains_point(point, self.get_transform(), radius)", + "docstring": "Return whether the given point is inside the patch. Parameters ---------- point : (float, float) The point (x, y) to check, in target coordinates of `.Patch.get_transform.Path.contains_pointNone.Artist.get_picker` is a number, the default is that value. This is so that picking works as expected. - Otherwise if the edge color has a non-zero alpha, the default is half of the linewidth. This is so that all the colored pixels are \"in\" the patch. - Finally, if the edge has 0 alpha, the default is 0. This is so that patches without a stroked edge do not have points outside of the filled region report as \"in\" due to an invisible edge. Returns ------- bool Notes ----- The proper use of this method depends on the transform of the patch. Isolated patches do not have a transform. In this case, the patch creation coordinates and the point coordinates match. The following example checks that the center of a circle is within the circle >>> center = 0, 0 >>> c = Circle(center, radius=1) >>> c.contains_point(center) True The convention of checking against the transformed patch stems from the fact that this method is predominantly used to check if display coordinates (e.g. from mouse events) are within the patch. If you want to do the above check with data coordinates, you have to properly transform them first: >>> center = 0, 0 >>> c = Circle(center, radius=3) >>> plt.gca().add_patch(c) >>> transformed_interior_point = c.get_data_transform().transform((0, 2)) >>> c.contains_point(transformed_interior_point) True", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:contains_point arg:self arg:point arg:radius arguments arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_writer", + "source_code": "def get_writer(self):\n if not self._writer:\n self._writer = debug_events_writer.DebugEventsWriter(self._dump_root, self._tfdbg_run_id, circular_buffer_size=self._circular_buffer_size)\n return self._writer", + "docstring": "Get the debug events writer for the currently configured dump root.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", + "ast_data": "FunctionDef name:get_writer arg:self arguments arg If Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "to_discrete", + "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n raise NotImplementedError('to_discrete is not implemented for this system class.')", + "docstring": "Return a discretized version of the current system. Parameters: See for details. Returns ------- sys: instance of", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "replace_dyn_with_fresh_var", + "source_code": "def replace_dyn_with_fresh_var(self, typ):\n if typ == Dyn:\n new_symbol = Var(next(self.symbol_iter))\n return new_symbol\n elif isinstance(typ, TensorType):\n new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]\n return TensorType(tuple(new_args))\n elif isinstance(typ, list):\n return [self.replace_dyn_with_fresh_var(t) for t in typ]\n elif isinstance(typ, tuple):\n return (self.replace_dyn_with_fresh_var(t) for t in typ)\n else:\n return typ", + "docstring": "Replace all unknown types with fresh type variables.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:replace_dyn_with_fresh_var arg:self arg:typ arguments arg arg If Compare Assign Call Call Return return:yes If Call Assign Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_call_hook_before_run", + "source_code": "def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict, options):\n hook_feeds = {}\n for hook in self._hooks:\n request = hook.before_run(run_context)\n if request is not None:\n if request.fetches is not None:\n fetch_dict[hook] = request.fetches\n if request.feed_dict:\n self._raise_if_feeds_intersects(hook_feeds, request.feed_dict, 'Same tensor is fed by two hooks.')\n hook_feeds.update(request.feed_dict)\n if request.options:\n self._merge_run_options(options, request.options)\n if not hook_feeds:\n return user_feed_dict\n if not user_feed_dict:\n return hook_feeds\n self._raise_if_feeds_intersects(user_feed_dict, hook_feeds, 'Same tensor is fed by a SessionRunHook and user.')\n hook_feeds.update(user_feed_dict)\n return hook_feeds", + "docstring": "Calls hooks.before_run and handles requests from hooks.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:_call_hook_before_run arg:self arg:run_context arg:fetch_dict arg:user_feed_dict arg:options arguments arg arg arg arg arg Assign For Assign Call If Compare If Compare Assign If Call Call If Call If Return return:yes If Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "visit_Slice", + "source_code": "def visit_Slice(self, node, **kwargs) -> slice:\n lower = node.lower\n if lower is not None:\n lower = self.visit(lower).value\n upper = node.upper\n if upper is not None:\n upper = self.visit(upper).value\n step = node.step\n if step is not None:\n step = self.visit(step).value\n return slice(lower, upper, step)", + "docstring": "df.index[slice(4,6)]", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\expr.py", + "ast_data": "FunctionDef name:visit_Slice arg:self arg:node arguments arg arg arg Assign If Compare Assign Call Assign If Compare Assign Call Assign If Compare Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_clip_rectangle", + "source_code": "def set_clip_rectangle(self, rectangle):\n self._cliprect = rectangle", + "docstring": "Set the clip rectangle to a or None.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_clip_rectangle arg:self arg:rectangle arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "Shubert01", + "source_code": "class Shubert01(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[-7.0835, 4.858]]\n self.fglob = -186.7309\n\n def fun(self, x, *args):\n self.nfev += 1\n j = atleast_2d(arange(1, 6)).T\n y = j * cos((j + 1) * x + j)\n return prod(sum(y, axis=0))", + "docstring": "Shubert 1 objective function. This class defines the Shubert 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shubert01}}(x) = \\prod_{i=1}^{n}\\left(\\sum_{j=1}^{5} cos(j+1)x_i+j \\right ) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: (and many others). .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil#133 is missing a prefactor of j before the cos function.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Shubert01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "create", + "source_code": "@staticmethod\ndef create(rpc_layer, address):\n if rpc_layer != 'grpc':\n raise ValueError('Only GRPC backend is supported at the moment.')\n return GrpcServer(address=address)", + "docstring": "Create TF RPC server at given address. Args: rpc_layer: Communication layer between client and server. Only \"grpc\" rpc layer is supported at the moment. address: Address where RPC server is hosted. Returns: An instance of class. Raises: A ValueError if rpc_layer other than \"grpc\" is used. Only GRPC is supported at the moment. Example usage: >>> import portpicker >>> @tf.function(input_signature=[ ... tf.TensorSpec([], tf.int32), ... tf.TensorSpec([], tf.int32)]) ... def remote_fn(a, b): ... return tf.add(a, b) >>> port = portpicker.pick_unused_port() >>> address = \"localhost:{}\".format(port) >>> server = tf.distribute.experimental.rpc.Server.create(\"grpc\", address) >>> server.register(\"addition\", remote_fn) >>> server.start()", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py", + "ast_data": "FunctionDef name:create arg:rpc_layer arg:address arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)", + "docstring": "Returns a tensor object initialized to random normal values (truncated). Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "estimate_rms_residuals", + "source_code": "def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):\n x_middle = x[:-1] + 0.5 * h\n s = 0.5 * h * (3 / 7) ** 0.5\n x1 = x_middle + s\n x2 = x_middle - s\n y1 = sol(x1)\n y2 = sol(x2)\n y1_prime = sol(x1, 1)\n y2_prime = sol(x2, 1)\n f1 = fun(x1, y1, p)\n f2 = fun(x2, y2, p)\n r1 = y1_prime - f1\n r2 = y2_prime - f2\n r_middle /= 1 + np.abs(f_middle)\n r1 /= 1 + np.abs(f1)\n r2 /= 1 + np.abs(f2)\n r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)\n r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)\n r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)\n return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5", + "docstring": "Estimate rms values of collocation residuals using Lobatto quadrature. The residuals are defined as the difference between the derivatives of our solution and rhs of the ODE system. We use relative residuals, i.e., normalized by 1 + np.abs(f). RMS values are computed as sqrt from the normalized integrals of the squared relative residuals over each interval. Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the fact that residuals at the mesh nodes are identically zero. In [2] they don't normalize integrals by interval lengths, which gives a higher rate of convergence of the residuals by the factor of h**0.5. I chose to do such normalization for an ease of interpretation of return values as RMS estimates. Returns ------- rms_res : ndarray, shape (m - 1,) Estimated rms values of the relative residuals over each interval. References ---------- .. [1] .. [2] J. Kierzenka, L. F. Shampine, \"A BVP Solver Based on Residual Control and the Maltab PSE\", ACM Trans. Math. Softw., Vol. 27, Number 3, pp. 299-316, 2001.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_bvp.py", + "ast_data": "FunctionDef name:estimate_rms_residuals arg:fun arg:sol arg:x arg:h arg:p arg:r_middle arg:f_middle arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "onnxscript_op", + "source_code": "def onnxscript_op(self, onnx_fn, *raw_args: torch.Tensor | _C.Value, outputs: int=1, **kwargs):\n symbolic_name = f'{onnx_fn.opset.domain}::{onnx_fn.name}'\n opset_version = onnx_fn.opset.version\n registration.custom_onnx_symbolic(symbolic_name, opset_version)(onnx_fn)\n return _add_op(self, symbolic_name, *raw_args, outputs=outputs, **kwargs)", + "docstring": "Creates an ONNX operator from onnx-script function, taking \"raw_args\" as inputs and \"kwargs\" as attributes. onnx-script repository: Args: onnx_fn: ONNXFunction from onnx-script; An example can be found at raw_args: The inputs to the operator; usually provided as arguments to the definition. outputs: The number of outputs this operator returns. By default an operator is assumed to return a single output. If is greater than one, this functions returns a tuple of output , representing each output of the ONNX operator in order. kwargs: The attributes of the ONNX operator, whose keys are named according to the following convention: indicates the attribute with type . The valid type specifiers are (float), (int), (string) or (Tensor). An attribute specified with type float accepts either a single float, or a list of floats (e.g., you would say for a attribute that takes a list of integers). Returns: The value representing the single output of this operator (see the keyword argument for multi-return nodes).", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py", + "ast_data": "FunctionDef name:onnxscript_op arg:self arg:onnx_fn arguments arg arg arg arg arg Assign Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "__init__", + "source_code": "def __init__(self, mbfunc, fillx=0, filly=0):\n super().__init__(mbfunc)\n self.fillx = fillx\n self.filly = filly\n ufunc_domain[mbfunc] = None\n ufunc_fills[mbfunc] = (fillx, filly)", + "docstring": "abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:mbfunc arg:fillx arg:filly arguments arg arg arg arg Call Call Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_signature_def", + "source_code": "def get_signature_def(meta_graph, signature_key):\n signature_def_map = meta_graph.signature_def\n signature_def_keys = set(signature_def_map.keys())\n logging.info('The given SavedModel MetaGraphDef contains SignatureDefs with the following keys: %s', signature_def_keys)\n if signature_key not in signature_def_keys:\n raise ValueError(\"No '{}' in the SavedModel's SignatureDefs. Possible values are '{}'.\".format(signature_key, ','.join(signature_def_keys)))\n return signature_def_map[signature_key]", + "docstring": "Get the signature def from meta_graph with given signature_key. Args: meta_graph: meta_graph_def. signature_key: signature_def in the meta_graph_def. Returns: The signature_def used for tflite conversion. Raises: ValueError: Given signature_key is not valid for this meta_graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_saved_model.py", + "ast_data": "FunctionDef name:get_signature_def arg:meta_graph arg:signature_key arguments arg arg Assign Assign Call Call Call If Compare Raise Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "_transform_input", + "source_code": "def _transform_input(input: Tensor) -> Tensor:\n if not torch.is_tensor(input):\n raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n if len(input.shape) not in [2, 3, 4]:\n raise ValueError(f'Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}')\n if len(input.shape) == 2:\n input = input.unsqueeze(0)\n if len(input.shape) == 3:\n input = input.unsqueeze(0)\n return input", + "docstring": "Reshape an input tensor to be (*, C, H, W). Accept either (H, W), (C, H, W) or (*, C, H, W). Args: input: Tensor Returns: Tensor", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py", + "ast_data": "FunctionDef name:_transform_input arg:input arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "unique_all", + "source_code": "@array_function_dispatch(_unique_all_dispatcher)\ndef unique_all(x):\n result = unique(x, return_index=True, return_inverse=True, return_counts=True, equal_nan=False)\n return UniqueAllResult(*result)", + "docstring": "Find the unique elements of an array, and counts, inverse, and indices. This function is an Array API compatible alternative to:: np.unique(x, return_index=True, return_inverse=True, return_counts=True, equal_nan=False, sorted=False) but returns a namedtuple for easier access to each output. .. note:: This function currently always returns a sorted result, however, this could change in any NumPy minor release. Parameters ---------- x : array_like Input array. It will be flattened if it is not already 1-D. Returns ------- out : namedtuple The result containing: * values - The unique elements of an input array. * indices - The first occurring indices for each unique element. * inverse_indices - The indices from the set of unique elements that reconstruct . * counts - The corresponding counts for each unique element. See Also -------- unique : Find the unique elements of an array. Examples -------- >>> import numpy as np >>> x = [1, 1, 2] >>> uniq = np.unique_all(x) >>> uniq.values array([1, 2]) >>> uniq.indices array([0, 2]) >>> uniq.inverse_indices array([0, 0, 1]) >>> uniq.counts array([2, 1])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py", + "ast_data": "FunctionDef name:unique_all arg:x arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n repr_buf = f'num_drop_channels={self.num_drop_channels}'\n return repr_buf", + "docstring": "Return a string representation of the object.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\channel_dropout.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "kornia", + "name": "Orthographic", + "source_code": "class Orthographic(CameraModelBase):\n\n def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n super().__init__(AffineTransform(), OrthographicProjection(), image_size, params)\n if params.shape[-1] != 4 or len(params.shape) > 2:\n raise ValueError('params must be of shape B, 4 for ORTHOGRAPHIC Camera')", + "docstring": "Orthographic Camera Model.", + "type": "class", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "ClassDef name:Orthographic FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg Call Call Call Call If BoolOp Compare Compare Call Raise Call" + }, + { + "library": "django", + "name": "divisibleby", + "source_code": "@register.filter(is_safe=False)\ndef divisibleby(value, arg):\n return int(value) % int(arg) == 0", + "docstring": "Return True if the value is divisible by the argument.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:divisibleby arg:value arg:arg arguments arg arg Return return:yes Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "aggregate_tensors_or_indexed_slices", + "source_code": "def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):\n if any((isinstance(v, indexed_slices.IndexedSlices) for v in values)):\n return backprop_util.AggregateIndexedSlicesGradients(values)\n else:\n return accumulation_fn(values)", + "docstring": "Aggregate tensors using and IndexedSlices via concat.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", + "ast_data": "FunctionDef name:aggregate_tensors_or_indexed_slices arg:values arg:accumulation_fn arguments arg arg If Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "def decision_function(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n indices = self.pairwise_indices_\n if indices is None:\n Xs = [X] * len(self.estimators_)\n else:\n Xs = [X[:, idx] for idx in indices]\n predictions = np.vstack([est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)]).T\n confidences = np.vstack([_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)]).T\n Y = _ovr_decision_function(predictions, confidences, len(self.classes_))\n if self.n_classes_ == 2:\n return Y[:, 1]\n return Y", + "docstring": "Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- Y : array-like of shape (n_samples, n_classes) or (n_samples,) Result of calling on the final estimator. .. versionchanged:: 0.19 output shape changed to `` to conform to scikit-learn conventions for binary classification.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign If Compare Assign Call Assign Assign Call Call Call Assign Call Call Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "get_fieldstructure", + "source_code": "def get_fieldstructure(adtype, lastname=None, parents=None):\n if parents is None:\n parents = {}\n names = adtype.names\n for name in names:\n current = adtype[name]\n if current.names is not None:\n if lastname:\n parents[name] = [lastname]\n else:\n parents[name] = []\n parents.update(get_fieldstructure(current, name, parents))\n else:\n lastparent = list(parents.get(lastname, []) or [])\n if lastparent:\n lastparent.append(lastname)\n elif lastname:\n lastparent = [lastname]\n parents[name] = lastparent or []\n return parents", + "docstring": "Returns a dictionary with fields indexing lists of their parent fields. This function is used to simplify access to fields nested in other fields. Parameters ---------- adtype : np.dtype Input datatype lastname : optional Last processed field name (used internally during recursion). parents : dictionary Dictionary of parent fields (used internally during recursion). Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), ... ('BB', [('BBA', int), ('BBB', int)])])]) >>> rfn.get_fieldstructure(ndtype) ... # XXX: possible regression, order of BBA and BBB is swapped {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:get_fieldstructure arg:adtype arg:lastname arg:parents arguments arg arg arg If Compare Assign Assign For Assign If Compare If Assign Assign Call Call Assign Call BoolOp Call If Call If Assign Assign BoolOp Return return:yes" + }, + { + "library": "pytorch", + "name": "_start_workers", + "source_code": "@abc.abstractmethod\ndef _start_workers(self, worker_group: WorkerGroup) -> dict[int, Any]:\n raise NotImplementedError", + "docstring": "Start ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", + "ast_data": "FunctionDef name:_start_workers arg:self arg:worker_group arguments arg arg Raise" + }, + { + "library": "authlib", + "name": "invalid_error_characters", + "source_code": "def invalid_error_characters(text: str) -> list[str]:\n valid_ranges = [(32, 33), (35, 91), (93, 126)]\n return [char for char in set(text) if not any((start <= ord(char) <= end for start, end in valid_ranges))]", + "docstring": "Check whether the string only contains characters from the restricted ASCII set defined in RFC6749 for errors.", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\base.py", + "ast_data": "FunctionDef name:invalid_error_characters arg:text arguments arg Assign Return return:yes Call Call Compare Call" + }, + { + "library": "scikit-learn", + "name": "_plot_ice_lines", + "source_code": "def _plot_ice_lines(self, preds, feature_values, n_ice_to_plot, ax, pd_plot_idx, n_total_lines_by_plot, individual_line_kw):\n rng = check_random_state(self.random_state)\n ice_lines_idx = rng.choice(preds.shape[0], n_ice_to_plot, replace=False)\n ice_lines_subsampled = preds[ice_lines_idx, :]\n for ice_idx, ice in enumerate(ice_lines_subsampled):\n line_idx = np.unravel_index(pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape)\n self.lines_[line_idx] = ax.plot(feature_values, ice.ravel(), **individual_line_kw)[0]", + "docstring": "Plot the ICE lines. Parameters ---------- preds : ndarray of shape (n_instances, n_grid_points) The predictions computed for all points of for a given feature for all samples in . feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. n_ice_to_plot : int The number of ICE lines to plot. ax : Matplotlib axes The axis on which to plot the ICE lines. pd_plot_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. n_total_lines_by_plot : int The total number of lines expected to be plot on the axis. individual_line_kw : dict Dict with keywords passed when plotting the ICE lines.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\inspection\\_plot\\partial_dependence.py", + "ast_data": "FunctionDef name:_plot_ice_lines arg:self arg:preds arg:feature_values arg:n_ice_to_plot arg:ax arg:pd_plot_idx arg:n_total_lines_by_plot arg:individual_line_kw arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign For Call Assign Call Assign Call Call" + }, + { + "library": "django", + "name": "should_skip_detecting_model", + "source_code": "def should_skip_detecting_model(migration, model):\n return model._meta.proxy or not model._meta.managed or (not router.allow_migrate(self.connection.alias, migration.app_label, model_name=model._meta.model_name))", + "docstring": "No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:should_skip_detecting_model arg:migration arg:model arguments arg arg Return return:yes BoolOp Call" + }, + { + "library": "pytorch", + "name": "set_post_optim_event", + "source_code": "def set_post_optim_event(self, event: torch.Event) -> None:\n self._get_fsdp_state()._state_ctx.post_optim_event = event", + "docstring": "Sets a post-optimizer-step event for the root FSDP module to wait the all-gather streams on. By default, the root FSDP module waits the all-gather streams on the current stream to ensure that the optimizer step has finished before all-gathering. However, this may introduce false dependencies if there is unrelated computation after the optimizer step. This API allows the user to provide their own event to wait on. After the root waits on the event, the event is discarded, so this API should be called with a new event each iteration. Args: event (torch.Event): Event recorded after the optimizer step to wait all-gather streams on.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", + "ast_data": "FunctionDef name:set_post_optim_event arg:self arg:event arguments arg arg Assign Call" + }, + { + "library": "tensorflow", + "name": "_write_graph_section", + "source_code": "def _write_graph_section(self, graph_order):\n self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))\n self._write_report('%s %s\\n' % (_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED, not graph_order.contains_cycle))\n l = list(graph_order.topological_order_or_cycle)\n for i in range(0, len(l)):\n self._write_report('%d \"%s\"\\n' % (i, l[i].name))\n self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_GRAPH))", + "docstring": "Writes the graph section of the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_graph_section arg:self arg:graph_order arguments arg arg Call Call Assign Call For Call Call Call Call" + }, + { + "library": "django", + "name": "add_item", + "source_code": "def add_item(self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, unique_id_is_permalink=None, categories=(), item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs):\n\n def to_str(s):\n return str(s) if s is not None else s\n categories = categories and [to_str(c) for c in categories]\n self.items.append({'title': to_str(title), 'link': iri_to_uri(link), 'description': to_str(description), 'author_email': to_str(author_email), 'author_name': to_str(author_name), 'author_link': iri_to_uri(author_link), 'pubdate': pubdate, 'updateddate': updateddate, 'comments': to_str(comments), 'unique_id': to_str(unique_id), 'unique_id_is_permalink': unique_id_is_permalink, 'enclosures': enclosures or (), 'categories': categories or (), 'item_copyright': to_str(item_copyright), 'ttl': to_str(ttl), **kwargs})", + "docstring": "Add an item to the feed. All args are expected to be strings except pubdate and updateddate, which are datetime.datetime objects, and enclosures, which is an iterable of instances of the Enclosure class.", + "type": "method", + "file_path": "django\\django\\utils\\feedgenerator.py", + "ast_data": "FunctionDef name:add_item arg:self arg:title arg:link arg:description arg:author_email arg:author_name arg:author_link arg:pubdate arg:comments arg:unique_id arg:unique_id_is_permalink arg:categories arg:item_copyright arg:ttl arg:updateddate arg:enclosures arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg FunctionDef name:to_str arg:s arguments arg Return return:yes Compare Call Assign BoolOp Call Call Call Call Call Call Call Call Call Call BoolOp BoolOp Call Call" + }, + { + "library": "scipy", + "name": "time_cdist", + "source_code": "def time_cdist(self, num_points, metric):\n distance.cdist(self.points, self.points, self.metric, w=self.weights, **self.kwargs)", + "docstring": "Time scipy.spatial.distance.cdist for weighted distance metrics.", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", + "ast_data": "FunctionDef name:time_cdist arg:self arg:num_points arg:metric arguments arg arg arg Call" + }, + { + "library": "scikit-learn", + "name": "PositiveSpectrumWarning", + "source_code": "class PositiveSpectrumWarning(UserWarning):\n pass", + "docstring": "Warning raised when the eigenvalues of a PSD matrix have issues This warning is typically raised by `` when the eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix (kernel) present significant negative eigenvalues, or bad conditioning i.e. very small non-zero eigenvalues compared to the largest eigenvalue. .. versionadded:: 0.22", + "type": "class", + "file_path": "scikit-learn\\sklearn\\exceptions.py", + "ast_data": "ClassDef name:PositiveSpectrumWarning" + }, + { + "library": "virtualenv", + "name": "read", + "source_code": "def read(self):\n return", + "docstring": "Nothing to read.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py", + "ast_data": "FunctionDef name:read arg:self arguments arg Return return:no" + }, + { + "library": "tensorflow", + "name": "benchmark_generation_time", + "source_code": "def benchmark_generation_time(output_token_len):\n timestamp_start = datetime.datetime.now()\n reply = sampler.chat(prompt, max_new_tokens=output_token_len)\n timestamp_end = datetime.datetime.now()\n timer_delta = timestamp_end - timestamp_start\n if output_token_len == OUTPUT_TOKEN_LEN:\n print(reply)\n return timer_delta.total_seconds() * 1000", + "docstring": "Benchmark generation time given output token length.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\flax_2b\\benchmark.py", + "ast_data": "FunctionDef name:benchmark_generation_time arg:output_token_len arguments arg Assign Call Assign Call Assign Call Assign If Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, metrics=None, weighted_metrics=None, output_names=None, from_serialized=False):\n super(MetricsContainer, self).__init__(output_names=output_names)\n self._user_metrics = metrics\n self._user_weighted_metrics = weighted_metrics\n self._metrics = metrics\n self._weighted_metrics = weighted_metrics\n self._built = False\n self._from_serialized = from_serialized", + "docstring": "Initializes a container for metrics. Arguments: metrics: see the argument from . weighted_metrics: see the argument from . output_names: A list of strings of names of outputs for the model. from_serialized: Whether the model being compiled is from a serialized model. Used to avoid redundantly applying pre-processing renaming steps.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:metrics arg:weighted_metrics arg:output_names arg:from_serialized arguments arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign" + }, + { + "library": "numpy", + "name": "fit", + "source_code": "@classmethod\ndef fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None, symbol='x'):\n if domain is None:\n domain = pu.getdomain(x)\n if domain[0] == domain[1]:\n domain[0] -= 1\n domain[1] += 1\n elif isinstance(domain, list) and len(domain) == 0:\n domain = cls.domain\n if window is None:\n window = cls.window\n xnew = pu.mapdomain(x, domain, window)\n res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)\n if full:\n [coef, status] = res\n return (cls(coef, domain=domain, window=window, symbol=symbol), status)\n else:\n coef = res\n return cls(coef, domain=domain, window=window, symbol=symbol)", + "docstring": "Least squares fit to data. Return a series instance that is the least squares fit to the data sampled at . The domain of the returned instance can be specified and this will often result in a superior fit with less chance of ill conditioning. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points `degdegxrcondlinalg.lstsq`.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:fit arg:cls arg:x arg:y arg:deg arg:domain arg:rcond arg:full arg:w arg:window arg:symbol arguments arg arg arg arg arg arg arg arg arg arg If Compare Assign Call If Compare If BoolOp Call Compare Call Assign If Compare Assign Assign Call Assign Call If Assign Return return:yes Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_AddNextAndBackEdge", + "source_code": "def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True):\n if isinstance(m, tensor_lib.Tensor):\n v = ops.convert_to_tensor(v)\n v = _NextIteration(v)\n if enforce_shape_invariant:\n _EnforceShapeInvariant(m, v)\n m.op._update_input(1, v)\n elif isinstance(m, composite_tensor.CompositeTensor):\n\n def update_component(m_component, v_component):\n m_component.op._update_input(1, v_component)\n if isinstance(m, indexed_slices.IndexedSlices):\n v = math_ops._as_indexed_slices(v, optimize=False)\n v = _NextIteration(v)\n return nest.map_structure(update_component, m, v, expand_composites=True)\n else:\n raise TypeError(f\"'m' must be a Tensor or CompositeTensor. Received: {type(m)}.\")\n return v", + "docstring": "Add NextIteration and back edge from v to m.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_AddNextAndBackEdge arg:m arg:v arg:enforce_shape_invariant arguments arg arg arg If Call Assign Call Assign Call If Call Call If Call FunctionDef name:update_component arg:m_component arg:v_component arguments arg arg Call If Call Assign Call Assign Call Return return:yes Call Raise Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "set_uuid", + "source_code": "def set_uuid(self, uuid: str) -> Styler:\n self.uuid = uuid\n return self", + "docstring": "Set the uuid applied to `id HTML elements. Styler.set_tooltips : Set the DataFrame of strings on ` is typically a more specific identifier, such as `idc1idTable visualization `_ for more examples.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:set_uuid arg:self arg:uuid arguments arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_identity_broadcaster", + "source_code": "@classmethod\ndef get_identity_broadcaster(cls, nvals, dtype=None):\n return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))", + "docstring": "Create an identity broadcaster. TODO(martinz): an identity broadcaster can be far more efficient than a generic broadcaster. Add an optimized implementation. Args: nvals: the number of values for the broadcaster. dtype: the dtype of the broadcaster, or None to use the dtype of nvals. Returns: an identity broadcaster from [0....nvals-1] to [0...nvals-1]", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:get_identity_broadcaster arg:cls arg:nvals arg:dtype arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_lookup_reduction", + "source_code": "def _lookup_reduction(self, t):\n assert isinstance(t, tensor_lib.Tensor), t\n return self._reduce_map.get(t.op)", + "docstring": "Lookups Tensor in the reduction maps.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_lookup_reduction arg:self arg:t arguments arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "module", + "source_code": "@property\ndef module(self) -> nn.Module:\n if isinstance(self._fsdp_wrapped_module, ActivationWrapper):\n return getattr(self._fsdp_wrapped_module, _CHECKPOINT_WRAPPED_MODULE)\n return self._fsdp_wrapped_module", + "docstring": "Return the wrapped module.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:module arg:self arguments arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "read_sas", + "source_code": "@doc(decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer')\ndef read_sas(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, format: str | None=None, index: Hashable | None=None, encoding: str | None=None, chunksize: int | None=None, iterator: bool=False, compression: CompressionOptions='infer') -> DataFrame | SASReader:\n if format is None:\n buffer_error_msg = 'If this is a buffer object rather than a string name, you must specify a format string'\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if not isinstance(filepath_or_buffer, str):\n raise ValueError(buffer_error_msg)\n fname = filepath_or_buffer.lower()\n if '.xpt' in fname:\n format = 'xport'\n elif '.sas7bdat' in fname:\n format = 'sas7bdat'\n else:\n raise ValueError(f'unable to infer format of SAS file from filename: {fname!r}')\n reader: SASReader\n if format.lower() == 'xport':\n from pandas.io.sas.sas_xport import XportReader\n reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression)\n elif format.lower() == 'sas7bdat':\n from pandas.io.sas.sas7bdat import SAS7BDATReader\n reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression)\n else:\n raise ValueError('unknown SAS format')\n if iterator or chunksize:\n return reader\n with reader:\n return reader.read()", + "docstring": "Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. {decompression_options} Returns ------- DataFrame, SAS7BDATReader, or XportReader DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader, file format is inferred from file extension. See Also -------- read_csv : Read a comma-separated values (csv) file into a pandas DataFrame. read_excel : Read an Excel file into a pandas DataFrame. read_spss : Read an SPSS file into a pandas DataFrame. read_orc : Load an ORC object into a pandas DataFrame. read_feather : Load a feather-format object into a pandas DataFrame. Examples -------- >>> df = pd.read_sas(\"sas_data.sas7bdat\") # doctest: +SKIP", + "type": "function", + "file_path": "pandas\\pandas\\io\\sas\\sasreader.py", + "ast_data": "FunctionDef name:read_sas arg:filepath_or_buffer arguments arg arg arg arg arg arg arg If Compare Assign Assign Call If Call Raise Call Assign Call If Compare Assign If Compare Assign Raise Call If Compare Call Assign Call If Compare Call Assign Call Raise Call If BoolOp Return return:yes With Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "case", + "source_code": "@tf_export(v1=['case'])\n@dispatch.add_dispatch_support\ndef case(pred_fn_pairs, default=None, exclusive=False, strict=False, name='case'):\n return _case_helper(cond.cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, strict=strict)", + "docstring": "Create a case operation. See also . The parameter is a dict or list of pairs of size N. Each pair contains a boolean scalar tensor and a python callable that creates the tensors to be returned if the boolean evaluates to True. is a callable generating a list of tensors. All the callables in as well as (if provided) should return the same number and types of tensors. If , all predicates are evaluated, and an exception is thrown if more than one of the predicates evaluates to . If , execution stops at the first predicate which evaluates to True, and the tensors generated by the corresponding function are returned immediately. If none of the predicates evaluate to True, this operation returns the tensors generated by . supports nested structures as implemented in . All of the callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by a callable, they are implicitly unpacked to single values. This behavior is disabled by passing . If an unordered dictionary is used for , the order of the conditional tests is not guaranteed. However, the order is guaranteed to be deterministic, so that variables created in conditional branches are created in fixed order across runs. @compatibility(eager) Unordered dictionaries are not supported in eager mode when . Use a list of tuples instead. @end_compatibility **Example 1:** Pseudocode: Expressions: Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to . strict: A boolean that enables/disables 'strict' mode; see above. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by if none does. Raises: TypeError: If is not a list/dictionary. TypeError: If is a list but does not contain 2-tuples. TypeError: If is not callable for any i, or is not callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_case.py", + "ast_data": "FunctionDef name:case arg:pred_fn_pairs arg:default arg:exclusive arg:strict arg:name arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "model_fields", + "source_code": "@cached_property\ndef model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n return {converter(field.column): field for field in self.model._meta.fields if field.column}", + "docstring": "A dict mapping column names to model field names.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:model_fields arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "sphinx", + "name": "find_tags", + "source_code": "def find_tags(self) -> dict[str, tuple[str, int, int]]:\n self.analyze()\n return self.tags", + "docstring": "Find class, function and method definitions and their location.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\__init__.py", + "ast_data": "FunctionDef name:find_tags arg:self arguments arg Call Return return:yes" + }, + { + "library": "seaborn", + "name": "set_style", + "source_code": "def set_style(style=None, rc=None):\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)", + "docstring": "Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:. See :func: to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst", + "type": "function", + "file_path": "seaborn\\seaborn\\rcmod.py", + "ast_data": "FunctionDef name:set_style arg:style arg:rc arguments arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "_pid_namespace_link", + "source_code": "def _pid_namespace_link(pid: Optional[int]=None) -> str:\n PID_NAMESPACE_PATH = '/proc/{}/ns/pid'\n pid = pid or os.getpid()\n return os.readlink(PID_NAMESPACE_PATH.format(pid))", + "docstring": "Returns the link to the process's namespace, example: pid:[4026531836]", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_strobelight\\cli_function_profiler.py", + "ast_data": "FunctionDef name:_pid_namespace_link arg:pid arguments arg Assign Assign BoolOp Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "CommonEndpoints", + "source_code": "class CommonEndpoints(SerializedAttributes.with_attributes('CommonEndpoints', checkpointable_objects=['variables', 'trainable_variables', 'regularization_losses'], functions=['__call__', 'call_and_return_all_conditional_losses', '_default_save_signature'])):\n pass", + "docstring": "Common endpoints shared by all models loadable by Keras. List of all attributes: variables: List of all variables in the model and its sublayers. trainable_variables: List of all trainable variables in the model and its sublayers. regularization_losses: List of all unconditional losses (losses not dependent on the inputs) in the model and its sublayers. __call__: Function that takes inputs and returns the outputs of the model call function. call_and_return_all_conditional_losses: Function that returns a tuple of (call function outputs, list of all losses that depend on the inputs). _default_save_signature: Traced model call function. This is only included if the top level exported object is a Keras model.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "ClassDef name:CommonEndpoints Call" + }, + { + "library": "pytorch", + "name": "autotune_hints_to_configs", + "source_code": "def autotune_hints_to_configs(hints: OrderedSet[AutotuneHint], size_hints, block_size: int, device_props: DeviceProperties) -> list[Config]:\n xyz_options: tuple[tuple[int, Optional[int], Optional[int]], ...]\n configs: list[Config] = []\n for hint in hints:\n if hint == AutotuneHint.ONE_ELEMENT_PER_THREAD:\n if len(size_hints) == 1:\n xyz_options = ((block_size // 4, None, None),)\n elif len(size_hints) == 2:\n xyz_options = ((block_size // 4, 1, None), (1, block_size // 4, None))\n elif len(size_hints) == 3:\n xyz_options = ((block_size // 4, 1, 1), (1, block_size // 4, 1), (1, 1, block_size // 4))\n configs.extend((triton_config(size_hints, *xyz, num_elements_per_warp=device_props.warp_size if device_props.warp_size else 32) for xyz in xyz_options))\n return configs", + "docstring": "AutotuneHints can be attached to the metadata of triton kernels for providing suggestions about what to try for autotuning. One reason to do this is if there are some configs that are only useful in specific scenarios, in which case we can avoid wasting compile time on autotuning unless we know we are in one of those scenarios. Based on those hints, this function will generate a list of additional autotuning configs to try.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:autotune_hints_to_configs arg:hints arg:size_hints arg:block_size arg:device_props arguments arg arg arg arg For If Compare If Compare Call Assign If Compare Call Assign If Compare Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "clear", + "source_code": "def clear(self):\n for fname in self._list_cache_files():\n self._delete(fname)", + "docstring": "Remove all the cache files.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\filebased.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg For Call Call" + }, + { + "library": "scikit-learn", + "name": "_process_decision_function", + "source_code": "def _process_decision_function(*, y_pred, target_type, classes, pos_label):\n if target_type == 'binary' and pos_label == classes[0]:\n return -1 * y_pred\n return y_pred", + "docstring": "Get the response values when the response method is . This function process the array in the binary and multi-label cases. In the binary case, it inverts the sign of the score if the positive label is not . In the multi-label case, it stacks the predictions if they are not in the \"compressed\" format . Parameters ---------- y_pred : ndarray Output of . The shape depends on the target type: - for binary classification, it is a 1d array of shape where the sign is assuming that is the positive class; - for multiclass classification, it is a 2d array of shape ; - for multilabel classification, it is a 2d array of shape . target_type : {\"binary\", \"multiclass\", \"multilabel-indicator\"} Type of the target. classes : ndarray of shape (n_classes,) or list of such arrays Class labels as reported by . pos_label : int, float, bool or str Only used with binary and multiclass targets. Returns ------- y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or (n_samples, n_output) Compressed predictions format as requested by the metrics.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_response.py", + "ast_data": "FunctionDef name:_process_decision_function arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "inferred_steps", + "source_code": "@property\ndef inferred_steps(self):\n return self._inferred_steps", + "docstring": "The inferred steps per epoch of the created . This will be in the case where: (1) A of unknown cardinality was passed to the , and (2) was not provided, and (3) The first epoch of iteration has not yet completed. Returns: The inferred steps per epoch of the created .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:inferred_steps arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "union1d", + "source_code": "@array_function_dispatch(_union1d_dispatcher)\ndef union1d(ar1, ar2):\n return unique(np.concatenate((ar1, ar2), axis=None))", + "docstring": "Find the union of two arrays. Return the unique, sorted array of values that are in either of the two input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. They are flattened if they are not already 1D. Returns ------- union1d : ndarray Unique, sorted union of the input arrays. Examples -------- >>> import numpy as np >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) To find the union of more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py", + "ast_data": "FunctionDef name:union1d arg:ar1 arg:ar2 arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "post_compile", + "source_code": "def post_compile(self, compiled_fn, aot_config, *, runtime_metadata) -> Callable:\n return compiled_fn", + "docstring": "Given an output of the compiler, wrap it with information received from prologue. Args: compiled_fn: Callable after calling compiler_fn aot_config: AOTConfig after calling prologue runtime_metadata: ViewAndMutationMeta after calling all wrappers's pre_compile steps. Example: def wrapped_compiled_fn(args): # do something with args, aot_config, fw_metadata return compiled_fn(args) return wrapped_compiled_fn", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", + "ast_data": "FunctionDef name:post_compile arg:self arg:compiled_fn arg:aot_config arguments arg arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "write_object_proto", + "source_code": "def write_object_proto(var, proto, options):\n if options.experimental_variable_policy._expand_distributed_variables():\n for var in var.values:\n var_proto = proto.variable.experimental_distributed_variable_components.add()\n var_proto.name = var.name.split(':')[0]\n var_proto.device = var.device", + "docstring": "Update a SavedObject proto for the caller. If a DistributedVariable object supports this method, it will be called when saving with a pre-built proto representing the object, plus an instance of . This method is then free to modify that proto instance. with or synchronization optionally write out information about their components to the field of a (depending on the variable policy). Args: var: The DistributedVariable object. proto: A pre-built proto for this object. It is assumed this will be a instance. options: A instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", + "ast_data": "FunctionDef name:write_object_proto arg:var arg:proto arg:options arguments arg arg arg If Call For Assign Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_update_snapshot", + "source_code": "def _update_snapshot(self):\n self._attribute_sentinel.invalidate_all()\n if self._external_modification or self._non_append_mutation:\n return\n self._last_wrapped_list_snapshot = list(self._storage)", + "docstring": "Acknowledges tracked changes to the wrapped list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:_update_snapshot arg:self arguments arg Call If BoolOp Return return:no Assign Call" + }, + { + "library": "numpy", + "name": "feature_sorted", + "source_code": "def feature_sorted(self, names, reverse=False):\n\n def sort_cb(k):\n if isinstance(k, str):\n return self.feature_supported[k]['interest']\n rank = max([self.feature_supported[f]['interest'] for f in k])\n rank += len(k) - 1\n return rank\n return sorted(names, reverse=reverse, key=sort_cb)", + "docstring": "Sort a list of CPU features ordered by the lowest interest. Parameters ---------- 'names': sequence sequence of supported feature names in uppercase. 'reverse': bool, optional If true, the sorted features is reversed. (highest interest) Returns ------- list, sorted CPU features", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:feature_sorted arg:self arg:names arg:reverse arguments arg arg arg FunctionDef name:sort_cb arg:k arguments arg If Call Return return:yes Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_scalar", + "source_code": "def _scalar(tf_fn, x, promote_to_float=False):\n x = np_array_ops.asarray(x)\n if promote_to_float and (not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact)):\n x = x.astype(np_utils.result_type(float))\n return tf_fn(x)", + "docstring": "Computes the tf_fn(x) for each element in . Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using . promote_to_float: whether to cast the argument to a float dtype if it is not already. Returns: An ndarray with the same shape as . The default output dtype is determined by , unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py", + "ast_data": "FunctionDef name:_scalar arg:tf_fn arg:x arg:promote_to_float arguments arg arg arg Assign Call If BoolOp Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_load_coverage", + "source_code": "def _load_coverage(F, header_length=6, dtype=np.int16):\n header = [F.readline() for _ in range(header_length)]\n make_tuple = lambda t: (t.split()[0], float(t.split()[1]))\n header = dict([make_tuple(line) for line in header])\n M = np.loadtxt(F, dtype=dtype)\n nodata = int(header[b'NODATA_value'])\n if nodata != -9999:\n M[nodata] = -9999\n return M", + "docstring": "Load a coverage file from an open file object. This will return a numpy array of the given dtype", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_species_distributions.py", + "ast_data": "FunctionDef name:_load_coverage arg:F arg:header_length arg:dtype arguments arg arg arg Assign Call Call Assign arguments arg Call Call Call Assign Call Call Assign Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "count_params", + "source_code": "def count_params(weights):\n unique_weights = {id(w): w for w in weights}.values()\n weight_shapes = [w.shape.as_list() for w in unique_weights]\n standardized_weight_shapes = [[0 if w_i is None else w_i for w_i in w] for w in weight_shapes]\n return int(sum((np.prod(p) for p in standardized_weight_shapes)))", + "docstring": "Count the total number of scalars composing the weights. Args: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py", + "ast_data": "FunctionDef name:count_params arg:weights arguments arg Assign Call Call Assign Call Assign Compare Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "RendezvousStateError", + "source_code": "class RendezvousStateError(RendezvousError):\n pass", + "docstring": "Raised when the state of a rendezvous is corrupt.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "ClassDef name:RendezvousStateError" + }, + { + "library": "scipy", + "name": "logpdf", + "source_code": "def logpdf(self, x, df, scale):\n dim, df, scale = self._process_parameters(df, scale)\n x = self._process_quantiles(x, dim)\n C, log_det_scale = self._cholesky_logdet(scale)\n out = self._logpdf(x, dim, df, log_det_scale, C)\n return _squeeze_output(out)", + "docstring": "Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at Notes ----- %(_doc_callparams_note)s", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:df arg:scale arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "max_pool1d", + "source_code": "def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):\n if return_indices:\n raise NotImplementedError('return_indices is not yet implemented!')\n if stride is None:\n stride = torch.jit.annotate(list[int], [])\n return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode=ceil_mode, return_indices=return_indices)", + "docstring": "Applies a 1D max pooling over a quantized input signal composed of several quantized input planes. .. note:: The input quantization parameters are propagated to the output. See :class: for details.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:max_pool1d arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Raise Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "allocator", + "source_code": "@property\ndef allocator(self) -> str:\n return self._allocator", + "docstring": "Name of the allocator used to create this tensor (string).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:allocator arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_constrained_layout", + "source_code": "def get_constrained_layout(self):\n return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine)", + "docstring": "Return whether constrained layout is being used. See :ref:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_constrained_layout arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_onenormest_matrix_power", + "source_code": "def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False):\n from scipy.sparse.linalg._onenormest import onenormest\n return onenormest(aslinearoperator(A) ** p)", + "docstring": "Efficiently estimate the 1-norm of A^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py", + "ast_data": "FunctionDef name:_onenormest_matrix_power arg:A arg:p arg:t arg:itmax arg:compute_v arg:compute_w arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "evaluate_signature", + "source_code": "def evaluate_signature(sig: Signature, globalns: dict[str, Any] | None=None, localns: dict[str, Any] | None=None) -> Signature:\n if globalns is None:\n globalns = {}\n if localns is None:\n localns = globalns\n parameters = list(sig.parameters.values())\n for i, param in enumerate(parameters):\n if param.annotation:\n annotation = _evaluate(param.annotation, globalns, localns)\n parameters[i] = param.replace(annotation=annotation)\n return_annotation = sig.return_annotation\n if return_annotation:\n return_annotation = _evaluate(return_annotation, globalns, localns)\n return sig.replace(parameters=parameters, return_annotation=return_annotation)", + "docstring": "Evaluate unresolved type annotations in a signature object.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:evaluate_signature arg:sig arg:globalns arg:localns arguments arg arg arg If Compare Assign If Compare Assign Assign Call Call For Call If Assign Call Assign Call Assign If Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_SparseTensorToCSRSparseMatrixGrad", + "source_code": "@ops.RegisterGradient('SparseTensorToCSRSparseMatrix')\ndef _SparseTensorToCSRSparseMatrixGrad(op: ops.Operation, grad):\n grad_values = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(grad, type=op.get_attr('T')).values\n return (None, grad_values, None)", + "docstring": "Gradient for sparse_tensor_to_csr_sparse_matrix op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py", + "ast_data": "FunctionDef name:_SparseTensorToCSRSparseMatrixGrad arg:op arg:grad arguments arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_clean_event", + "source_code": "def _clean_event(self, event):\n if event.xdata is None:\n event = self._prev_event\n else:\n event = copy.copy(event)\n event.xdata, event.ydata = self._get_data(event)\n self._prev_event = event\n return event", + "docstring": "Preprocess an event: - Replace *event* by the previous event if *event* has no `` from this widget's Axes, and clip them to the axes limits. - Update the previous event.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_clean_event arg:self arg:event arguments arg arg If Compare Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "AutoNumbering", + "source_code": "class AutoNumbering(SphinxTransform):\n default_priority = 210\n\n def apply(self, **kwargs: Any) -> None:\n domain: StandardDomain = self.env.domains.standard_domain\n for node in self.document.findall(nodes.Element):\n if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None and (node['ids'] == []):\n self.document.note_implicit_target(node)", + "docstring": "Register IDs of tables, figures and literal_blocks to assign numbers.", + "type": "class", + "file_path": "sphinx\\sphinx\\transforms\\__init__.py", + "ast_data": "ClassDef name:AutoNumbering Assign FunctionDef name:apply arg:self arguments arg arg For Call If BoolOp Call Compare Call Compare Call" + }, + { + "library": "tensorflow", + "name": "get_random_numeric_tensor", + "source_code": "def get_random_numeric_tensor(self, dtype=None, min_size=_MIN_SIZE, max_size=_MAX_SIZE, min_val=_MIN_INT, max_val=_MAX_INT):\n if max_size > 8:\n raise tf.errors.InvalidArgumentError(None, None, 'Given size of {} will result in an OOM error'.format(max_size))\n seed = self.get_int()\n shape = self.get_int_list(min_length=min_size, max_length=max_size, min_int=min_size, max_int=max_size)\n if dtype is None:\n dtype = self.get_tf_dtype(allowed_set=_TF_RANDOM_DTYPES)\n elif dtype not in _TF_RANDOM_DTYPES:\n raise tf.errors.InvalidArgumentError(None, None, 'Given dtype {} is not accepted in get_random_numeric_tensor'.format(dtype))\n return tf.random.uniform(shape=shape, minval=min_val, maxval=max_val, dtype=dtype, seed=seed)", + "docstring": "Return a tensor of random shape and values. Generated tensors are capped at dimension sizes of 8, as 2^32 bytes of requested memory crashes the fuzzer (see b/34190148). Returns only type that tf.random.uniform can generate. If you need a different type, consider using tf.cast. Args: dtype: Type of tensor, must of one of the following types: float16, float32, float64, int32, or int64 min_size: Minimum size of returned tensor max_size: Maximum size of returned tensor min_val: Minimum value in returned tensor max_val: Maximum value in returned tensor Returns: Tensor of random shape filled with uniformly random numeric values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py", + "ast_data": "FunctionDef name:get_random_numeric_tensor arg:self arg:dtype arg:min_size arg:max_size arg:min_val arg:max_val arguments arg arg arg arg arg arg If Compare Raise Call Call Assign Call Assign Call If Compare Assign Call If Compare Raise Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "bgr_to_rgba", + "source_code": "def bgr_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n if not isinstance(alpha_val, (float, Tensor)):\n raise TypeError(f'alpha_val type is not a float or Tensor. Got {type(alpha_val)}')\n x_rgb: Tensor = bgr_to_rgb(image)\n return rgb_to_rgba(x_rgb, alpha_val)", + "docstring": "Convert an image from BGR to RGBA. Args: image: BGR Image to be converted to RGBA of shape :math:. alpha_val: A float number for the alpha value or a tensor of shape :math:. Returns: RGBA version of the image with shape :math:. .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = bgr_to_rgba(input, 1.) # 2x4x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "FunctionDef name:bgr_to_rgba arg:image arg:alpha_val arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Call Raise Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "inner_sharded", + "source_code": "@classmethod\ndef inner_sharded(cls, mesh: Mesh, inner_dim: str, rank: int) -> 'Layout':\n return cls.batch_sharded(mesh, inner_dim, rank, axis=rank - 1)", + "docstring": "Returns a layout sharded on inner dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:inner_sharded arg:cls arg:mesh arg:inner_dim arg:rank arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_xaxis_text2_transform", + "source_code": "def get_xaxis_text2_transform(self, pad_points):\n labels_align = mpl.rcParams['xtick.alignment']\n return (self.get_xaxis_transform(which='tick2') + mtransforms.ScaledTranslation(0, pad_points / 72, self.get_figure(root=False).dpi_scale_trans), 'bottom', labels_align)", + "docstring": "Returns ------- transform : Transform The transform used for drawing secondary x-axis labels, which will add *pad_points* of padding (in points) between the axis and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'} The text vertical alignment. halign : {'center', 'left', 'right'} The text horizontal alignment. Notes ----- This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_xaxis_text2_transform arg:self arg:pad_points arguments arg arg Assign Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_forward", + "source_code": "def _forward(self, x):\n raise NotImplementedError('forward not implemented.')", + "docstring": "Subclass implementation for public function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:_forward arg:self arg:x arguments arg arg Raise Call" + }, + { + "library": "scipy", + "name": "_smart_matrix_product", + "source_code": "def _smart_matrix_product(A, B, alpha=None, structure=None):\n if len(A.shape) != 2:\n raise ValueError('expected A to be a rectangular matrix')\n if len(B.shape) != 2:\n raise ValueError('expected B to be a rectangular matrix')\n f = None\n if structure == UPPER_TRIANGULAR:\n if not issparse(A) and (not issparse(B)) and (not is_pydata_spmatrix(A)) and (not is_pydata_spmatrix(B)):\n f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))\n if f is not None:\n if alpha is None:\n alpha = 1.0\n out = f(alpha, A, B)\n elif alpha is None:\n out = A.dot(B)\n else:\n out = alpha * A.dot(B)\n return out", + "docstring": "A matrix product that knows about sparse and structured matrices. Parameters ---------- A : 2d ndarray First matrix. B : 2d ndarray Second matrix. alpha : float The matrix product will be scaled by this constant. structure : str, optional A string describing the structure of both matrices and . Only is currently supported. Returns ------- M : 2d ndarray Matrix product of A and B.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:_smart_matrix_product arg:A arg:B arg:alpha arg:structure arguments arg arg arg arg If Compare Call Raise Call If Compare Call Raise Call Assign If Compare If BoolOp Call Call Call Call Assign Call If Compare If Compare Assign Assign Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Maximum", + "source_code": "class Maximum(_Merge):\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output = math_ops.maximum(output, inputs[i])\n return output", + "docstring": "Layer that computes the maximum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> maxed = tf.keras.layers.Maximum()([x1, x2]) >>> maxed.shape TensorShape([5, 8])", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py", + "ast_data": "ClassDef name:Maximum FunctionDef name:_merge_function arg:self arg:inputs arguments arg arg Assign For Call Call Assign Call Return return:yes" + }, + { + "library": "virtualenv", + "name": "ExePathRef", + "source_code": "class ExePathRef(PathRef, ABC):\n\n def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY) -> None:\n super().__init__(src, must, when)\n self._can_run = None\n\n @property\n def can_symlink(self):\n if self.FS_SUPPORTS_SYMLINK:\n return self.can_run\n return False\n\n @property\n def can_run(self):\n if self._can_run is None:\n mode = self.src.stat().st_mode\n for key in [S_IXUSR, S_IXGRP, S_IXOTH]:\n if mode & key:\n self._can_run = True\n break\n else:\n self._can_run = False\n return self._can_run", + "docstring": "Base class that checks if a executable can be references via symlink/copy.", + "type": "class", + "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\ref.py", + "ast_data": "ClassDef name:ExePathRef FunctionDef name:__init__ arg:self arg:src arg:must arg:when arguments arg arg arg arg Call Call Assign FunctionDef name:can_symlink arg:self arguments arg If Return return:yes Return return:yes FunctionDef name:can_run arg:self arguments arg If Compare Assign Call For If Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "addcmul", + "source_code": "@register_decomposition(aten.addcmul)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('self', 'tensor1', 'tensor2'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef addcmul(self: TensorLikeType, tensor1: TensorLikeType, tensor2: TensorLikeType, *, value: NumberType=1) -> TensorLikeType:\n if value is not None:\n dtype = self.dtype\n python_type = utils.dtype_to_type(dtype)\n torch._check_value(utils.is_weakly_lesser_type(type(value), python_type), lambda: f'value argument of type {type(value)} cannot be safely cast to type {python_type}!')\n return self + value * tensor1 * tensor2", + "docstring": "Reference implementation of torch.addcmul", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:addcmul arg:self arg:tensor1 arg:tensor2 arguments arg arg arg arg If Compare Assign Assign Call Call Call Call arguments Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "should_trigger_for_step", + "source_code": "def should_trigger_for_step(self, step):\n if self._last_triggered_step is None:\n return True\n if self._last_triggered_step == step:\n return False\n if self._every_secs is not None:\n if time.time() >= self._last_triggered_time + self._every_secs:\n return True\n if self._every_steps is not None:\n if step >= self._last_triggered_step + self._every_steps:\n return True\n return False", + "docstring": "Return true if the timer should trigger for the specified step. Args: step: Training step to trigger on. Returns: True if the difference between the current time and the time of the last trigger exceeds , or if the difference between the current step and the last triggered step exceeds . False otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:should_trigger_for_step arg:self arg:step arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare If Compare Call Return return:yes If Compare If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_detect_is_causal_mask", + "source_code": "def _detect_is_causal_mask(mask: Optional[Tensor], is_causal: Optional[bool]=None, size: Optional[int]=None) -> bool:\n make_causal = is_causal is True\n if is_causal is None and mask is not None:\n sz = size if size is not None else mask.size(-2)\n causal_comparison = _generate_square_subsequent_mask(sz, device=mask.device, dtype=mask.dtype)\n if mask.size() == causal_comparison.size():\n make_causal = bool((mask == causal_comparison).all())\n else:\n make_causal = False\n return make_causal", + "docstring": "Return whether the given attention mask is causal. Warning: If `` if not None, check whether the mask is a causal mask of the provided size Otherwise, checks for any causal mask.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\modules\\transformer.py", + "ast_data": "FunctionDef name:_detect_is_causal_mask arg:mask arg:is_causal arg:size arguments arg arg arg Assign Compare If BoolOp Compare Compare Assign Compare Call Assign Call If Compare Call Call Assign Call Call Compare Assign Return return:yes" + }, + { + "library": "django", + "name": "_checkdim", + "source_code": "def _checkdim(self, dim):\n if dim < 0 or dim > 2:\n raise GEOSException('invalid ordinate dimension \"%d\"' % dim)", + "docstring": "Check the given dimension.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:_checkdim arg:self arg:dim arguments arg arg If BoolOp Compare Compare Raise Call" + }, + { + "library": "scipy", + "name": "ihfftn", + "source_code": "@_dispatch\ndef ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):\n return (Dispatchable(x, np.ndarray),)", + "docstring": "Compute the N-D inverse discrete Fourier Transform for a real spectrum. This function computes the N-D inverse discrete Fourier Transform over any number of axes in an M-D real array by means of the Fast Fourier Transform (FFT). By default, all axes are transformed, with the real transform performed over the last axis, while the remaining transforms are complex. Parameters ---------- x : array_like Input array, taken to be real. s : sequence of ints, optional Shape (length along each transformed axis) to use from the input. (`saxessfftxfft~scipy.fft.fftaxessxssaxesaxesxihfftifftnrfft`. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.ones((2, 2, 2)) >>> scipy.fft.ihfftn(x) array([[[1.+0.j, 0.+0.j], # may vary [0.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]]]) >>> scipy.fft.ihfftn(x, axes=(2, 0)) array([[[1.+0.j, 0.+0.j], # may vary [1.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]]])", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_basic.py", + "ast_data": "FunctionDef name:ihfftn arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "remove", + "source_code": "def remove(self, keys, name=None):\n return self.erase(keys, name)", + "docstring": "Removes and its associated values from the table. If a key is not present in the table, it is silently ignored. Args: keys: Keys to remove. Can be a tensor of any shape. Must match the table's key type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when do not match the table data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:remove arg:self arg:keys arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "output_shapes", + "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_shapes(iterator)`.')\ndef output_shapes(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)", + "docstring": "Returns the shape of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "@available_if(_estimator_has('predict_proba', delegates=('final_estimator_', 'final_estimator')))\ndef predict_proba(self, X):\n check_is_fitted(self)\n y_pred = self.final_estimator_.predict_proba(self.transform(X))\n if isinstance(self._label_encoder, list):\n y_pred = np.array([preds[:, 0] for preds in y_pred]).T\n return y_pred", + "docstring": "Predict class probabilities for using the final estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) or list of ndarray of shape (n_output,) The class probabilities of the input samples.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call If Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "tree_flatten_with_path", + "source_code": "def tree_flatten_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:\n _, treespec = tree_flatten(tree, is_leaf)\n return (list(_generate_key_paths((), tree, is_leaf)), treespec)", + "docstring": "Flattens a pytree like :func:, but also returns each leaf's key path. Args: tree: a pytree to flatten. If it contains a custom type, that type must be registered with an appropriate when registered with :func:. is_leaf: An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `TrueTreeSpec` representing the structure of the flattened tree.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_pytree.py", + "ast_data": "FunctionDef name:tree_flatten_with_path arg:tree arg:is_leaf arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "conv3d", + "source_code": "def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):\n if padding_mode != 'zeros':\n raise NotImplementedError('Only zero-padding is supported!')\n if input.dtype != torch.quint8:\n raise NotImplementedError('Only torch.quint8 is supported for activation tensor!')\n if weight.dtype != torch.qint8:\n raise NotImplementedError('Only torch.qint8 is supported for weight tensor!')\n if input.ndim != 5:\n raise ValueError('Input shape must be `(N, C, D, H, W)`!')\n stride = _triple(stride)\n padding = _triple(padding)\n dilation = _triple(dilation)\n packed_params = torch.ops.quantized.conv3d_prepack(weight, bias, stride, padding, dilation, groups)\n return torch.ops.quantized.conv3d(input, packed_params, scale, zero_point)", + "docstring": "Applies a 3D convolution over a quantized 3D input composed of several input planes. See :class: for details and output shape. Args: input: quantized input tensor of shape :math: weight: quantized filters of shape :math: bias: **non-quantized** bias tensor of shape :math:. The tensor type must be . stride: the stride of the convolving kernel. Can be a single number or a tuple . Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple . Default: 0 dilation: the spacing between kernel elements. Can be a single number or a tuple . Default: 1 groups: split input into groups, :math: should be divisible by the number of groups. Default: 1 padding_mode: the padding mode to use. Only \"zeros\" is supported for quantized convolution at the moment. Default: \"zeros\" scale: quantization scale for the output. Default: 1.0 zero_point: quantization zero_point for the output. Default: 0 dtype: quantization data type to use. Default: `` Examples:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE) >>> from torch.ao.nn.quantized import functional as qF >>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float) >>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float) >>> bias = torch.randn(8, dtype=torch.float) >>> >>> scale, zero_point = 1.0, 0 >>> dtype_inputs = torch.quint8 >>> dtype_filters = torch.qint8 >>> >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters) >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs) >>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:conv3d arg:input arg:weight arg:bias arg:stride arg:padding arg:dilation arg:groups arg:padding_mode arg:scale arg:zero_point arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, estimate_mode_type: str) -> Self:\n if estimate_mode_type == 'operator-level-benchmark':\n self._estimate_runtime = RuntimeEstimator._benchmark_estimate\n elif estimate_mode_type == 'operator-level-cost-model':\n self._estimate_runtime = RuntimeEstimator._roofline_estimate\n else:\n raise NotImplementedError(f'estimate_mode_type {estimate_mode_type} not supported')\n return self", + "docstring": "Sets the estimate mode type. Currently supported modes: - \"operator-level-benchmark\": Estimates runtime using operator benchmarking. - \"operator-level-cost-model\": Estimates runtime using roofline cost model. Args: estimate_mode_type (str): The type of estimate mode to use. Returns: SACEstimator: The SAC estimator instance. Raises: NotImplementedError: If the estimate mode type is not supported.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:estimate_mode_type arguments arg arg If Compare Assign If Compare Assign Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "_replace_locals", + "source_code": "def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:\n toknum, tokval = tok\n if toknum == tokenize.OP and tokval == '@':\n return (tokenize.OP, LOCAL_TAG)\n return (toknum, tokval)", + "docstring": "Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as `` symbol with it.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expr.py", + "ast_data": "FunctionDef name:_replace_locals arg:tok arguments arg Assign If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "const_ext", + "source_code": "def const_ext(x, n, axis=-1):\n if n < 1:\n return x\n left_end = axis_slice(x, start=0, stop=1, axis=axis)\n ones_shape = [1] * x.ndim\n ones_shape[axis] = n\n ones = np.ones(ones_shape, dtype=x.dtype)\n left_ext = ones * left_end\n right_end = axis_slice(x, start=-1, axis=axis)\n right_ext = ones * right_end\n ext = np.concatenate((left_ext, x, right_ext), axis=axis)\n return ext", + "docstring": "Constant extension at the boundaries of an array Generate a new ndarray that is a constant extension of along an axis. The extension repeats the values at the first and last element of the axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend at each end of the axis. axis : int, optional The axis along which to extend . Default is -1. Examples -------- >>> import numpy as np >>> from scipy.signal._arraytools import const_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> const_ext(a, 2) array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) Constant extension continues with the same values as the endpoints of the array: >>> t = np.linspace(0, 1.5, 100) >>> a = 0.9 * np.sin(2 * np.pi * t**2) >>> b = const_ext(a, 40) >>> import matplotlib.pyplot as plt >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='constant extension') >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') >>> plt.legend(loc='best') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_arraytools.py", + "ast_data": "FunctionDef name:const_ext arg:x arg:n arg:axis arguments arg arg arg If Compare Return return:yes Assign Call Assign Assign Assign Call Assign Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "is_type_factory", + "source_code": "def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:\n\n def inner(x) -> None:\n if type(x) != _type:\n raise ValueError(f\"Value must have type '{_type}'\")\n return inner", + "docstring": "Parameters ---------- - a type to be compared against (e.g. type(x) == ) Returns ------- validator - a function of a single argument x , which raises ValueError if type(x) is not equal to", + "type": "function", + "file_path": "pandas\\pandas\\_config\\config.py", + "ast_data": "FunctionDef name:is_type_factory arg:_type arguments arg FunctionDef name:inner arg:x arguments arg If Compare Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "compute_specificity_at_sensitivity", + "source_code": "def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):\n sensitivities = math_ops.divide(tp, tp + fn + kepsilon)\n min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))\n indices_at_minval = math_ops.equal(math_ops.abs(sensitivities - sensitivity), min_val)\n indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64)\n indices_at_minval = math_ops.cumsum(indices_at_minval)\n tf_index = math_ops.argmax(indices_at_minval, 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n return math_ops.divide(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name)", + "docstring": "Computes the specificity at the given sensitivity. Args: tp: True positives. tn: True negatives. fp: False positives. fn: False negatives. name: The name of the operation. Returns: The specificity using the aggregated values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:compute_specificity_at_sensitivity arg:tp arg:tn arg:fp arg:fn arg:name arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "getslots", + "source_code": "def getslots(obj: Any) -> dict[str, Any] | dict[str, None] | None:\n if not isclass(obj):\n raise TypeError\n __slots__ = safe_getattr(obj, '__slots__', None)\n if __slots__ is None:\n return None\n elif isinstance(__slots__, dict):\n return __slots__\n elif isinstance(__slots__, str):\n return {__slots__: None}\n elif isinstance(__slots__, list | tuple):\n return dict.fromkeys(__slots__)\n else:\n raise ValueError", + "docstring": "Safely get :term: as a dictionary if any. - This returns `TypeErrorValueError` is invalid.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:getslots arg:obj arguments arg If Call Raise Assign Call If Compare Return return:no If Call Return return:yes If Call Return return:yes If Call Return return:yes Call Raise" + }, + { + "library": "tensorflow", + "name": "UnsupportedLanguageElementError", + "source_code": "class UnsupportedLanguageElementError(PyCTError, NotImplementedError):\n pass", + "docstring": "Raised for code patterns that AutoGraph does not support.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\errors.py", + "ast_data": "ClassDef name:UnsupportedLanguageElementError" + }, + { + "library": "scikit-learn", + "name": "_get_updates", + "source_code": "def _get_updates(self, grads):\n updates = [self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads)]\n self.velocities = updates\n if self.nesterov:\n updates = [self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads)]\n return updates", + "docstring": "Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py", + "ast_data": "FunctionDef name:_get_updates arg:self arg:grads arguments arg arg Assign Call Assign If Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "synchronize_staging", + "source_code": "def synchronize_staging(self) -> None:\n pass", + "docstring": "In the case is async in some way, this method should be called to ensure staging is complete and it is safe to begin modifying the original", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py", + "ast_data": "FunctionDef name:synchronize_staging arg:self arguments arg" + }, + { + "library": "scipy", + "name": "points", + "source_code": "@property\ndef points(self):\n return []", + "docstring": "Any problematic points introduced by the transformation. These should be specified as points where ``.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_cubature.py", + "ast_data": "FunctionDef name:points arg:self arguments arg Return return:no" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name):\n self._name = name\n self._registry = {}", + "docstring": "Creates a new registry.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_variable_scope_store", + "source_code": "def get_variable_scope_store():\n scope_store = ops.get_collection(_VARSCOPESTORE_KEY)\n if not scope_store:\n scope_store = _VariableScopeStore()\n ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)\n else:\n scope_store = scope_store[0]\n return scope_store", + "docstring": "Returns the variable scope store for current thread.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:get_variable_scope_store arguments Assign Call If Assign Call Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "Rgb255ToRgb", + "source_code": "class Rgb255ToRgb(Module):\n\n def forward(self, image: Tensor) -> Tensor:\n return rgb255_to_rgb(image)", + "docstring": "Convert an image from RGB [0, 255] to RGB for visualization purposes. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = Rgb255ToRgb() >>> output = rgb(input) # 2x3x4x5", + "type": "class", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "ClassDef name:Rgb255ToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_timestamped_export_dir", + "source_code": "def get_timestamped_export_dir(export_dir_base):\n attempts = 0\n while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n timestamp = int(time.time())\n result_dir = file_io.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))\n if not gfile.Exists(result_dir):\n return result_dir\n time.sleep(1)\n attempts += 1\n logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n raise RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')", + "docstring": "Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py", + "ast_data": "FunctionDef name:get_timestamped_export_dir arg:export_dir_base arguments arg Assign While Compare Assign Call Call Assign Call Call Call Call If Call Return return:yes Call Call Call Call Raise Call" + }, + { + "library": "scipy", + "name": "Benchmark", + "source_code": "class Benchmark:\n pass", + "docstring": "Base class with sensible options", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\common.py", + "ast_data": "ClassDef name:Benchmark" + }, + { + "library": "django", + "name": "builtin_template_path", + "source_code": "def builtin_template_path(name):\n return Path(__file__).parent / 'templates' / name", + "docstring": "Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments.", + "type": "function", + "file_path": "django\\django\\views\\csrf.py", + "ast_data": "FunctionDef name:builtin_template_path arg:name arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "Identity", + "source_code": "class Identity(Initializer):\n\n def __init__(self, gain=1.0):\n self.gain = gain\n\n def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if len(shape) != 2:\n raise ValueError('Identity matrix initializer can only be used for 2D matrices.')\n initializer = linalg_ops.eye(*shape, dtype=dtype)\n return self.gain * initializer\n\n def get_config(self):\n return {'gain': self.gain}", + "docstring": "Initializer that generates the identity matrix. Also available via the shortcut function . Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Identity() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "ClassDef name:Identity FunctionDef name:__init__ arg:self arg:gain arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Call Raise Call Assign Call Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "is_number", + "source_code": "def is_number(self):\n return False", + "docstring": "Is this a number token?", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py", + "ast_data": "FunctionDef name:is_number arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_record_variable_scope_and_name", + "source_code": "def _record_variable_scope_and_name(embedding_var_name, embedding_var_name_in_fc, is_shared_embedding=False, bypass_scope_validation=False):\n g = ops.get_default_graph()\n collection = g.get_collection_ref(_TPU_FC_TO_SCOPE)\n if not collection:\n collection.append({})\n var_def_dict = collection[0]\n captured_scope = variable_scope.get_variable_scope()\n captured_scope_name = captured_scope.name\n if embedding_var_name in var_def_dict:\n if var_def_dict[embedding_var_name][0] != captured_scope_name and (not is_shared_embedding) and (not bypass_scope_validation):\n raise ValueError('For embedding var name {}, the variable scope name is different, got {}; expected {}'.format(embedding_var_name, captured_scope_name, var_def_dict[embedding_var_name][0]))\n if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc:\n raise ValueError('For embedding var name {}, the embedding name is different, got {}; expected {}'.format(embedding_var_name, embedding_var_name_in_fc, var_def_dict[embedding_var_name][1]))\n else:\n var_def_dict[embedding_var_name] = (captured_scope_name, embedding_var_name_in_fc)", + "docstring": "Add embedding variable name and scope to collection.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:_record_variable_scope_and_name arg:embedding_var_name arg:embedding_var_name_in_fc arg:is_shared_embedding arg:bypass_scope_validation arguments arg arg arg arg Assign Call Assign Call If Call Assign Assign Call Assign If Compare If BoolOp Compare Raise Call Call If Compare Raise Call Call Assign" + }, + { + "library": "matplotlib", + "name": "_interpolate_single_key", + "source_code": "def _interpolate_single_key(self, return_key, tri_index, x, y):\n raise NotImplementedError('TriInterpolator subclasses' + 'should implement _interpolate_single_key!')", + "docstring": "Interpolate at points belonging to the triangulation (inside an unmasked triangles). Parameters ---------- return_key : {'z', 'dzdx', 'dzdy'} The requested values (z or its derivatives). tri_index : 1D int array Valid triangle index (cannot be -1). x, y : 1D arrays, same shape as Valid locations where interpolation is requested. Returns ------- 1-d array Returned array of the same size as *tri_index*", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_interpolate_single_key arg:self arg:return_key arg:tri_index arg:x arg:y arguments arg arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "args_to_mixed_eager_tensors", + "source_code": "def args_to_mixed_eager_tensors(lists, ctx):\n del ctx\n assert len(lists) > 1\n lists_ret = [[]]\n for l in lists[1:]:\n if len(l) != len(lists[0]):\n raise ValueError('Expected list arguments to be the same length: %d != %d (%r vs. %r).' % (len(lists[0]), len(l), lists[0], l))\n lists_ret.append([])\n types = []\n for i in range(len(lists[0])):\n dtype = None\n for l in lists:\n if isinstance(l[i], core_types.Value):\n dtype = l[i].dtype\n break\n if dtype is None:\n lists_ret[0].append(tensor_conversion_registry.convert(lists[0][i]))\n dtype = lists_ret[0][i].dtype\n for j in range(1, len(lists)):\n lists_ret[j].append(tensor_conversion_registry.convert(lists[j][i], dtype=dtype))\n else:\n for j in range(len(lists)):\n lists_ret[j].append(tensor_conversion_registry.convert(lists[j][i], dtype=dtype))\n types.append(dtype.as_datatype_enum)\n return (types, lists_ret)", + "docstring": "Converts a list of same-length lists of values to eager tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py", + "ast_data": "FunctionDef name:args_to_mixed_eager_tensors arg:lists arg:ctx arguments arg arg Compare Call Assign For If Compare Call Call Raise Call Call Call Call Assign For Call Call Assign For If Call Assign If Compare Call Call Assign For Call Call Call Call For Call Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "MergeError", + "source_code": "class MergeError(ValueError):\n pass", + "docstring": "Exception raised when merging data. Subclass of ``. See Also -------- DataFrame.join : For joining DataFrames on their indexes. merge : For merging two DataFrames on a common set of keys. Examples -------- >>> left = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"b\", \"d\"], \"b\": [\"cat\", \"dog\", \"weasel\", \"horse\"]}, ... index=range(4), ... ) >>> right = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"c\", \"d\"], \"c\": [\"meow\", \"bark\", \"chirp\", \"nay\"]}, ... index=range(4), ... ).set_index(\"a\") >>> left.join( ... right, ... on=\"a\", ... validate=\"one_to_one\", ... ) Traceback (most recent call last): MergeError: Merge keys are not unique in left dataset; not a one-to-one merge", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:MergeError" + }, + { + "library": "numpy", + "name": "DTypePromotionError", + "source_code": "class DTypePromotionError(TypeError):\n pass", + "docstring": "Multiple DTypes could not be converted to a common one. This exception derives from `arr1 == arr2object('field1', 'field2')('field1',)` mismatch.", + "type": "class", + "file_path": "numpy\\numpy\\exceptions.py", + "ast_data": "ClassDef name:DTypePromotionError" + }, + { + "library": "kornia", + "name": "rgb_to_grayscale", + "source_code": "def rgb_to_grayscale(image: Tensor, rgb_weights: Optional[Tensor]=None) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(image)\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n if rgb_weights is None:\n if image.dtype == torch.uint8:\n rgb_weights = torch.tensor([76, 150, 29], device=image.device, dtype=torch.uint8)\n elif image.dtype in (torch.float16, torch.float32, torch.float64):\n rgb_weights = torch.tensor([0.299, 0.587, 0.114], device=image.device, dtype=image.dtype)\n else:\n raise TypeError(f'Unknown data type: {image.dtype}')\n else:\n rgb_weights = rgb_weights.to(image)\n r: Tensor = image[..., 0:1, :, :]\n g: Tensor = image[..., 1:2, :, :]\n b: Tensor = image[..., 2:3, :, :]\n w_r, w_g, w_b = rgb_weights.unbind()\n return w_r * r + w_g * g + w_b * b", + "docstring": "Convert a RGB image to grayscale version of image. .. image:: _static/img/rgb_to_grayscale.png The image data is assumed to be in the range of (0, 1). Args: image: RGB image to be converted to grayscale with shape :math:. rgb_weights: Weights that will be applied on each channel (RGB). The sum of the weights should add up to one. Returns: grayscale version of the image with shape :math:. .. note:: See a working example __. Example: >>> input = torch.rand(2, 3, 4, 5) >>> gray = rgb_to_grayscale(input) # 2x1x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\gray.py", + "ast_data": "FunctionDef name:rgb_to_grayscale arg:image arg:rgb_weights arguments arg arg Call If BoolOp Compare Call Compare Raise Call If Compare If Compare Assign Call If Compare Assign Call Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "constant_value", + "source_code": "@tf_export(v1=['ragged.constant_value'])\n@dispatch.add_dispatch_support\ndef constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None, row_splits_dtype='int64') -> Union[ragged_tensor_value.RaggedTensorValue, np.ndarray]:\n if dtype is not None and isinstance(dtype, dtypes.DType):\n dtype = dtype.as_numpy_dtype\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype\n\n def _ragged_factory(values, row_splits):\n row_splits = np.array(row_splits, dtype=row_splits_dtype)\n return ragged_tensor_value.RaggedTensorValue(values, row_splits)\n\n def _inner_factory(pylist, dtype, shape, name=None):\n if dtype is object or dtype is None:\n return np_reshape(np.array(pylist, dtype=dtype), shape)\n else:\n return np_reshape(np.array(pylist).astype(dtype), shape)\n return _constant_value(_ragged_factory, _inner_factory, pylist, dtype, ragged_rank, inner_shape)", + "docstring": "Constructs a RaggedTensorValue from a nested Python list. Warning: This function returns a , not a . If you wish to construct a constant , use instead. Example: >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]]) tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]), row_splits=array([0, 2, 3, 6])) All scalar values in must have the same nesting depth , and the returned will have rank . If contains no scalar values, then is one greater than the maximum depth of empty lists in . All scalar values in must be compatible with . Args: pylist: A nested , or . Any nested element that is not a or must be a scalar value compatible with . dtype: . The type of elements for the returned . If not specified, then a default is chosen based on the scalar values in . ragged_rank: An integer specifying the ragged rank of the returned . Must be nonnegative and less than . Defaults to if is not specified. Defaults to if is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned . Defaults to if is not specified. If is specified, then a default is chosen based on the contents of . row_splits_dtype: data type for the constructed 's row_splits. One of or . Returns: A or with rank and the specified , containing the values from . Raises: ValueError: If the scalar values in have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py", + "ast_data": "FunctionDef name:constant_value arg:pylist arg:dtype arg:ragged_rank arg:inner_shape arg:row_splits_dtype arguments arg arg arg arg arg If BoolOp Compare Call Assign Assign Call FunctionDef name:_ragged_factory arg:values arg:row_splits arguments arg arg Assign Call Return return:yes Call FunctionDef name:_inner_factory arg:pylist arg:dtype arg:shape arg:name arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Call Call Return return:yes Call Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_maybe_real", + "source_code": "def _maybe_real(A, B, tol=None):\n if np.isrealobj(A) and np.iscomplexobj(B):\n if tol is None:\n tol = {0: feps * 1000.0, 1: eps * 1000000.0}[_array_precision[B.dtype.char]]\n if np.allclose(B.imag, 0.0, atol=tol):\n B = B.real\n return B", + "docstring": "Return either B or the real part of B, depending on properties of A and B. The motivation is that B has been computed as a complicated function of A, and B may be perturbed by negligible imaginary components. If A is real and B is complex with small imaginary components, then return a real copy of B. The assumption in that case would be that the imaginary components of B are numerical artifacts. Parameters ---------- A : ndarray Input array whose type is to be checked as real vs. complex. B : ndarray Array to be returned, possibly without its imaginary part. tol : float Absolute tolerance. Returns ------- out : real or complex array Either the input array B or only the real part of the input array B.", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:_maybe_real arg:A arg:B arg:tol arguments arg arg arg If BoolOp Call Call If Compare Assign If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "as_text", + "source_code": "def as_text(bytes_or_text, encoding='utf-8'):\n encoding = codecs.lookup(encoding).name\n if isinstance(bytes_or_text, str):\n return bytes_or_text\n elif isinstance(bytes_or_text, bytes):\n return bytes_or_text.decode(encoding)\n else:\n raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)", + "docstring": "Converts any string-like python input types to unicode. Returns the input as a unicode string. Uses utf-8 encoding for text by default. Args: bytes_or_text: A , , or object. encoding: A string indicating the charset for decoding unicode. Returns: A (Python 2) or (Python 3) object. Raises: TypeError: If is not a binary or unicode string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py", + "ast_data": "FunctionDef name:as_text arg:bytes_or_text arg:encoding arguments arg arg Assign Call If Call Return return:yes If Call Return return:yes Call Raise Call" + }, + { + "library": "kornia", + "name": "trans_y", + "source_code": "@classmethod\ndef trans_y(cls, y: Tensor) -> Se2:\n zs = zeros_like(y)\n return cls.trans(zs, y)", + "docstring": "Construct a y-axis translation. Args: y: the y-axis translation.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:trans_y arg:cls arg:y arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "backtick_repl", + "source_code": "def backtick_repl(matchobj):\n if matchobj.group(2) != ' ':\n post = '\\\\ ' + matchobj.group(2)\n else:\n post = matchobj.group(2)\n return '``' + matchobj.group(1) + '``' + post", + "docstring": "repl to add an escaped space following a code block if needed", + "type": "function", + "file_path": "numpy\\tools\\changelog.py", + "ast_data": "FunctionDef name:backtick_repl arg:matchobj arguments arg If Compare Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "check_funcs_once", + "source_code": "def check_funcs_once(self, funcs, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None):\n self._check_compiler()\n body = []\n if decl:\n for f, v in decl.items():\n if v:\n body.append('int %s (void);' % f)\n body.append('#ifdef _MSC_VER')\n for func in funcs:\n body.append('#pragma function(%s)' % func)\n body.append('#endif')\n body.append('int main (void) {')\n if call:\n for f in funcs:\n if f in call and call[f]:\n if not (call_args and f in call_args and call_args[f]):\n args = ''\n else:\n args = call_args[f]\n body.append(' %s(%s);' % (f, args))\n else:\n body.append(' %s;' % f)\n else:\n for f in funcs:\n body.append(' %s;' % f)\n body.append(' return 0;')\n body.append('}')\n body = '\\n'.join(body) + '\\n'\n return self.try_link(body, headers, include_dirs, libraries, library_dirs)", + "docstring": "Check a list of functions at once. This is useful to speed up things, since all the functions in the funcs list will be put in one compilation unit. Arguments --------- funcs : seq list of functions to test include_dirs : seq list of header paths libraries : seq list of libraries to link the code snippet to library_dirs : seq list of library paths decl : dict for every (key, value), the declaration in the value will be used for function in key. If a function is not in the dictionary, no declaration will be used. call : dict for every item (f, value), if the value is True, a call will be done to the function f.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\command\\config.py", + "ast_data": "FunctionDef name:check_funcs_once arg:self arg:funcs arg:headers arg:include_dirs arg:libraries arg:library_dirs arg:decl arg:call arg:call_args arguments arg arg arg arg arg arg arg arg arg Call Assign If For Call If Call Call For Call Call Call If For If BoolOp Compare If BoolOp Compare Assign Assign Call Call For Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "create_init_files", + "source_code": "def create_init_files(dst_dir: str) -> None:\n for root, _, files in os.walk(dst_dir):\n if any((file.endswith('.py') or file.endswith('.so') for file in files)):\n curr_dir = root\n while curr_dir != dst_dir:\n init_path = os.path.join(curr_dir, '__init__.py')\n if not os.path.exists(init_path):\n open(init_path, 'w').close()\n curr_dir = os.path.dirname(curr_dir)", + "docstring": "Create __init__.py files.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py", + "ast_data": "FunctionDef name:create_init_files arg:dst_dir arguments arg For Call If Call BoolOp Call Call Assign While Compare Assign Call If Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "key_dtype", + "source_code": "@property\ndef key_dtype(self):\n return self._key_dtype", + "docstring": "The table key dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:key_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "get_context_data", + "source_code": "def get_context_data(self, **kwargs):\n return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}", + "docstring": "Return a dictionary to use as extra context if either `` are used. Default implementation preserves the old behavior of using {'obj': item, 'site': current_site} as the context.", + "type": "method", + "file_path": "django\\django\\contrib\\syndication\\views.py", + "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_color_to_rgb", + "source_code": "def _color_to_rgb(color, input):\n if input == 'hls':\n color = colorsys.hls_to_rgb(*color)\n elif input == 'husl':\n color = husl.husl_to_rgb(*color)\n color = tuple(np.clip(color, 0, 1))\n elif input == 'xkcd':\n color = xkcd_rgb[color]\n return mpl.colors.to_rgb(color)", + "docstring": "Add some more flexibility to color choices.", + "type": "function", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:_color_to_rgb arg:color arg:input arguments arg arg If Compare Assign Call If Compare Assign Call Assign Call Call If Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "clear", + "source_code": "def clear(self):\n self._checks.set_facecolor(['none'] * len(self._active_check_colors))\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n l1.set_visible(False)\n l2.set_visible(False)\n if self.drawon:\n self.canvas.draw()\n if self.eventson:\n self._observers.process('clicked', None)", + "docstring": "Uncheck all checkboxes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call If Call For Call Call If Call If Call" + }, + { + "library": "pytorch", + "name": "_time_estimator", + "source_code": "@contextlib.contextmanager\ndef _time_estimator(group: Optional[ProcessGroup]=None, device: Optional[torch.device]=None):\n group = group or _get_default_group()\n device = device or _get_pg_default_device(group)\n backend = group._get_backend(device)\n if not backend.supports_time_estimate:\n raise NotImplementedError(f'collective time estimator is not supported in the curent version of backend {backend}')\n backend._start_time_estimate()\n cm = _TimeEstimator()\n yield cm\n cm.estimated_time = backend._end_time_estimate()", + "docstring": "Context manager used to estimate time of collectives. Within the context manager, nothing is actually run and the backend just simulates the collective time only. Args: group (, optional): The process group to work on. If None, the default process group will be used. device (, optional): Default is None, set to a device if there isn't a implementation by the backend. Examples: >>> # xdoctest: +SKIP(\"no rank\") >>> # Synchronous ops >>> with _time_estimator() as cm: >>> for i in range(num_colls): >>> dist.all_reduce(tensors[i]) >>> # estimate time is stored in cm.estimated_time .. warning:: :func: currently only support NCCL backend but it can easily be extended to other backends. Also a NCCL communicator needs to be created because only with a real communicator can we do accurate estimation. The communicator internally has knowledge about the links it runs on (e.g. intra-node or inter-node, whether the links are NVLink or PCI-e or IB).", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_time_estimator arg:group arg:device arguments arg arg Assign BoolOp Call Assign BoolOp Call Assign Call If Raise Call Call Assign Call Assign Call" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "def __call__(self, iterable):\n config = get_config()\n warning_filters = warnings.filters\n iterable_with_config_and_warning_filters = ((_with_config_and_warning_filters(delayed_func, config, warning_filters), args, kwargs) for delayed_func, args, kwargs in iterable)\n return super().__call__(iterable_with_config_and_warning_filters)", + "docstring": "Dispatch the tasks and return the results. Parameters ---------- iterable : iterable Iterable containing tuples of (delayed_function, args, kwargs) that should be consumed. Returns ------- results : list List of results of the tasks.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\parallel.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:iterable arguments arg arg Assign Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ZerosLike", + "source_code": "def ZerosLike(op, index):\n if not util.IsSwitch(op):\n return _ZerosLikeV2(op, index)\n else:\n return _ZerosLikeV1(op, index)", + "docstring": "Create zeros_like for the specified output of an op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:ZerosLike arg:op arg:index arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "is_django_path", + "source_code": "def is_django_path(path):\n return Path(django.__file__).parent in Path(path).parents", + "docstring": "Return True if the given file path is nested under Django.", + "type": "function", + "file_path": "django\\django\\utils\\autoreload.py", + "ast_data": "FunctionDef name:is_django_path arg:path arguments arg Return return:yes Compare Call Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n return self", + "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "pg_map", + "source_code": "@property\ndef pg_map(self) -> dict[ProcessGroup, tuple[str, Store]]:\n global _pg_map\n return _pg_map", + "docstring": "Provide Mapping from ProcessGroup to backend name and store. For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store) For MPI pg, it is a map from ProcessGroup to (Backend, None) TODO don't expose the map, expose fine grained ops", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:pg_map arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_check_color_like", + "source_code": "def _check_color_like(**kwargs):\n for k, v in kwargs.items():\n if not is_color_like(v):\n raise ValueError(f\"{v!r} is not a valid value for {k}: supported inputs are (r, g, b) and (r, g, b, a) 0-1 float tuples; '#rrggbb', '#rrggbbaa', '#rgb', '#rgba' strings; named color strings; string reprs of 0-1 floats for grayscale values; 'C0', 'C1', ... strings for colors of the color cycle; and pairs combining one of the above with an alpha value\")", + "docstring": "For each *key, value* pair in *kwargs*, check that *value* is color-like.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:_check_color_like arguments arg For Call If Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_convert_composite_tensor", + "source_code": "def _convert_composite_tensor(value, expected_type, path, context):\n if context == _ConversionContext.SPEC:\n if not (isinstance(value, type_spec.TypeSpec) and _issubclass(value.value_type, expected_type)):\n raise TypeError(f'{''.join(path)}: expected a TypeSpec for {expected_type.__name__!r}, got {type(value).__name__!r}')\n return value\n if not isinstance(value, expected_type):\n raise TypeError(f'{''.join(path)}: expected {expected_type.__name__!r}, got {type(value).__name__!r}')\n return value", + "docstring": "Converts to a value of type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py", + "ast_data": "FunctionDef name:_convert_composite_tensor arg:value arg:expected_type arg:path arg:context arguments arg arg arg arg If Compare If BoolOp Call Call Raise Call Call Call Return return:yes If Call Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_kl_dirichlet_dirichlet", + "source_code": "@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)\ndef _kl_dirichlet_dirichlet(d1, d2, name=None):\n with ops.name_scope(name, 'kl_dirichlet_dirichlet', values=[d1.concentration, d2.concentration]):\n digamma_sum_d1 = math_ops.digamma(math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))\n digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1\n concentration_diff = d1.concentration - d2.concentration\n return math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - special_math_ops.lbeta(d1.concentration) + special_math_ops.lbeta(d2.concentration)", + "docstring": "Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. Args: d1: instance of a Dirichlet distribution object. d2: instance of a Dirichlet distribution object. name: (optional) Name to use for created operations. default is \"kl_dirichlet_dirichlet\". Returns: Batchwise KL(d1 || d2)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py", + "ast_data": "FunctionDef name:_kl_dirichlet_dirichlet arg:d1 arg:d2 arg:name arguments arg arg arg With Call Assign Call Call Assign Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "argsort", + "source_code": "def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', **kwargs) -> npt.NDArray[np.intp]:\n return super().argsort(ascending=ascending, kind=kind, **kwargs)", + "docstring": "Return the indices that would sort the Categorical. Missing values are sorted at the end. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. **kwargs: passed through to :func:. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort Notes ----- While an ordering is applied to the category values, arg-sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Examples -------- >>> pd.Categorical([\"b\", \"b\", \"a\", \"c\"]).argsort() array([2, 0, 1, 3]) >>> cat = pd.Categorical( ... [\"b\", \"b\", \"a\", \"c\"], categories=[\"c\", \"b\", \"a\"], ordered=True ... ) >>> cat.argsort() array([3, 0, 1, 2]) Missing values are placed at the end >>> cat = pd.Categorical([2, None, 1]) >>> cat.argsort() array([2, 0, 1])", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:argsort arg:self arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_validate_all_indexes_accounted_for_in_provided_output", + "source_code": "def _validate_all_indexes_accounted_for_in_provided_output(self, saved_nodes_idxs: list[int], recomputable_node_idxs: list[int]) -> None:\n recomputable_node_idxs_set = set(recomputable_node_idxs)\n saved_nodes_idxs_set = set(saved_nodes_idxs)\n all_candidate_nodes_idxs = set(range(len(self._graph_info_provider.all_recomputable_banned_nodes)))\n assert len(recomputable_node_idxs_set.intersection(saved_nodes_idxs_set)) == 0, 'Saved nodes and recomputable nodes cannot have any overlaps'\n assert recomputable_node_idxs_set.union(saved_nodes_idxs_set) == all_candidate_nodes_idxs, 'All candidate nodes must be accounted for in the provided output'", + "docstring": "Validate that all indexes are accounted for in the provided output. This function checks that the union of saved nodes and recomputable nodes covers all candidate nodes without any overlaps.", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_activation_checkpointing\\knapsack_evaluator.py", + "ast_data": "FunctionDef name:_validate_all_indexes_accounted_for_in_provided_output arg:self arg:saved_nodes_idxs arg:recomputable_node_idxs arguments arg arg arg Assign Call Assign Call Assign Call Call Call Compare Call Call Compare Call" + }, + { + "library": "django", + "name": "ContentFile", + "source_code": "class ContentFile(File):\n\n def __init__(self, content, name=None):\n stream_class = StringIO if isinstance(content, str) else BytesIO\n super().__init__(stream_class(content), name=name)\n self.size = len(content)\n\n def __str__(self):\n return 'Raw content'\n\n def __bool__(self):\n return True\n\n def open(self, mode=None):\n self.seek(0)\n return self\n\n def close(self):\n pass\n\n def write(self, data):\n self.__dict__.pop('size', None)\n return self.file.write(data)", + "docstring": "A File-like object that takes just raw content, rather than an actual file.", + "type": "class", + "file_path": "django\\django\\core\\files\\base.py", + "ast_data": "ClassDef name:ContentFile FunctionDef name:__init__ arg:self arg:content arg:name arguments arg arg arg Assign Call Call Call Call Assign Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__bool__ arg:self arguments arg Return return:yes FunctionDef name:open arg:self arg:mode arguments arg arg Call Return return:yes FunctionDef name:close arg:self arguments arg FunctionDef name:write arg:self arg:data arguments arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "random_poisson", + "source_code": "@tf_export(v1=['random.poisson', 'random_poisson'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_poisson')\ndef random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):\n return random_poisson_v2(shape, lam, dtype, seed, name)", + "docstring": "Draws samples from each of the given Poisson distribution(s). is the rate parameter describing the distribution(s). Example: Args: lam: A Tensor or Python value or N-D array of type . provides the rate parameter(s) describing the poisson distribution(s) to sample. shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per \"rate\"-parameterized distribution. dtype: The type of the output: , , , or . seed: A Python integer. Used to create a random seed for the distributions. See for behavior. name: Optional name for the operation. Returns: samples: a of shape with values of type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", + "ast_data": "FunctionDef name:random_poisson arg:lam arg:shape arg:dtype arg:seed arg:name arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "to_ss", + "source_code": "def to_ss(self):\n return copy.deepcopy(self)", + "docstring": "Return a copy of the current system. Returns ------- sys : instance of The current system (copy)", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_ss arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_check_X", + "source_code": "@abstractmethod\ndef _check_X(self, X):\n pass", + "docstring": "To be overridden in subclasses with the actual checks. Only used in predict* methods.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg" + }, + { + "library": "pytorch", + "name": "_Multinomial", + "source_code": "class _Multinomial(Constraint):\n is_discrete = True\n event_dim = 1\n\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound\n\n def check(self, x):\n return (x >= 0).all(dim=-1) & (x.sum(dim=-1) <= self.upper_bound)", + "docstring": "Constrain to nonnegative integer values summing to at most an upper bound. Note due to limitations of the Multinomial distribution, this currently checks the weaker condition ``.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_Multinomial Assign Assign FunctionDef name:__init__ arg:self arg:upper_bound arguments arg arg Assign FunctionDef name:check arg:self arg:x arguments arg arg Return return:yes Call Compare Compare Call" + }, + { + "library": "pytorch", + "name": "generate_return_type_definition_and_registrations", + "source_code": "def generate_return_type_definition_and_registrations(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> tuple[list[str], list[str]]:\n typenames: dict[str, str] = {}\n definitions: list[str] = []\n registrations: list[str] = []\n for overload in overloads:\n fieldnames = structseq_fieldnames(overload.function.func.returns)\n if not fieldnames:\n continue\n fields = ', '.join((f'{{\"{fn}\", \"\"}}' for fn in fieldnames))\n name = cpp.name(overload.function.func)\n tn_key = gen_structseq_typename_key(overload.function)\n typename = typenames.get(tn_key)\n if typename is None:\n typename = f'{name}NamedTuple{('' if not definitions else len(definitions))}'\n typenames[tn_key] = typename\n definitions.append(f'PyTypeObject* get_{name}_structseq() {{\\n static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }};\\n static PyTypeObject {typename};\\n static bool is_initialized = false;\\n static PyStructSequence_Desc desc = {{ \"torch.return_types.{name}\", nullptr, NamedTuple_fields, {len(fieldnames)} }};\\n if (!is_initialized) {{\\n PyStructSequence_InitType(&{typename}, &desc);\\n {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;\\n is_initialized = true;\\n }}\\n return &{typename};\\n}}\\n')\n registrations.append(f'addReturnType(return_types_module, \"{name}\", generated::get_{name}_structseq());')\n return (definitions, registrations)", + "docstring": "Generate block of function in to initialize and return named tuple for a native function which returns named tuple and registration invocations in same file.", + "type": "function", + "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py", + "ast_data": "FunctionDef name:generate_return_type_definition_and_registrations arg:overloads arguments arg For Assign Call If Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "initialize_all_tables", + "source_code": "@tf_export(v1=['initialize_all_tables'])\n@deprecated(None, 'Use `tf.tables_initializer` instead.')\ndef initialize_all_tables(name='init_all_tables'):\n return tables_initializer(name)", + "docstring": "Returns an Op that initializes all tables of the default graph. Args: name: Optional name for the initialization op. Returns: An Op that initializes all tables. Note that if there are not tables the returned Op is a NoOp.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:initialize_all_tables arg:name arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "clone", + "source_code": "def clone(self):\n _warn_typed_storage_removal()\n return self._new_wrapped_storage(self._untyped_storage.clone())", + "docstring": "Return a copy of this storage.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "reproject_disparity_to_3D", + "source_code": "def reproject_disparity_to_3D(self, disparity_tensor: Tensor) -> Tensor:\n return reproject_disparity_to_3D(disparity_tensor, self.Q)", + "docstring": "Reproject the disparity tensor to a 3D point cloud. Args: disparity_tensor: Disparity tensor of shape :math:. Returns: The 3D point cloud of shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:reproject_disparity_to_3D arg:self arg:disparity_tensor arguments arg arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "make_surface", + "source_code": "def make_surface(array):\n if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats:\n array = array.round(0).astype(numpy_uint32)\n return pix_make_surface(array)", + "docstring": "pygame.surfarray.make_surface (array): return Surface Copy an array to a new surface. Create a new Surface that best resembles the data and format on the array. The array can be 2D or 3D with any sized integer values.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:make_surface arg:array arguments arg If BoolOp Call Compare Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "apply_transform_keypoint", + "source_code": "def apply_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n padding_size = params['padding_size'].to(device=input.device)\n input = input.pad(padding_size)\n return super().apply_transform_keypoint(input=input, params=params, flags=flags, transform=transform)", + "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\crop.py", + "ast_data": "FunctionDef name:apply_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_intermediates", + "source_code": "def _get_intermediates(func_graph):\n intermediates = []\n reverse_captures = dict(((v.ref(), k) for k, v in func_graph.captures))\n for op in func_graph.get_operations():\n if op.type == 'Identity':\n continue\n if op.type == 'MutexLock':\n continue\n for o in op.outputs:\n if o is not func_graph.inputs[0] and o.dtype != dtypes.resource and (_get_accumulator(o) is None) and (o.ref() not in reverse_captures):\n intermediates.append(o)\n return intermediates", + "docstring": "Returns all tensors in that should be accumulated.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py", + "ast_data": "FunctionDef name:_get_intermediates arg:func_graph arguments arg Assign Assign Call Call For Call If Compare If Compare For If BoolOp Compare Compare Compare Call Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_use_composite_impl", + "source_code": "def _use_composite_impl(fast, tensor_shape):\n if fast is False:\n return False\n batch_shape = tensor_shape[:-2]\n matrix_shape = tensor_shape[-2:]\n if not tensor_shape.is_fully_defined():\n return True\n tensor_size = tensor_shape.num_elements() * matrix.dtype.size\n is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)\n L2_CACHE_SIZE_GUESSTIMATE = 256000\n if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:\n return False\n else:\n return True", + "docstring": "Determines whether to use the composite or specialized CPU kernel. When the total size of the tensor is larger than the cache size and the batch size is large compared to the smallest matrix dimension, then the composite implementation is inefficient since it has to read the entire tensor from memory multiple times. In this case we fall back to the original CPU kernel, which does all the computational steps on each matrix separately. Only fast mode is supported by the composite impl, so is returned if is . Args: fast: bool indicating if fast mode in the solver was requested. tensor_shape: The shape of the tensor. Returns: True if the composite impl should be used. False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py", + "ast_data": "FunctionDef name:_use_composite_impl arg:fast arg:tensor_shape arguments arg arg If Compare Return return:yes Assign Assign If Call Return return:yes Assign Call Assign Compare Call Call Assign If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "unselect", + "source_code": "def unselect(self):\n if sys.platform == 'win32':\n self.dc.SelectObject(wx.NullBitmap)\n self.IsSelected = False", + "docstring": "Select a Null bitmap into this wxDC instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:unselect arg:self arguments arg If Compare Call Assign" + }, + { + "library": "authlib", + "name": "plaintext_signature", + "source_code": "def plaintext_signature(client_secret, token_secret):\n signature = escape(client_secret or '')\n signature += '&'\n signature += escape(token_secret or '')\n return signature", + "docstring": "Generate signature via PLAINTEXT method, per _. The \"PLAINTEXT\" method does not employ a signature algorithm. It MUST be used with a transport-layer mechanism such as TLS or SSL (or sent over a secure channel with equivalent protections). It does not utilize the signature base string or the \"oauth_timestamp\" and \"oauth_nonce\" parameters. .. _:", + "type": "function", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py", + "ast_data": "FunctionDef name:plaintext_signature arg:client_secret arg:token_secret arguments arg arg Assign Call BoolOp Call BoolOp Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_rgba", + "source_code": "@classmethod\ndef get_rgba(cls, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):\n alpha = cls.get_grey(tex, fontsize, dpi)\n rgba = np.empty((*alpha.shape, 4))\n rgba[..., :3] = mpl.colors.to_rgb(rgb)\n rgba[..., -1] = alpha\n return rgba", + "docstring": "Return latex's rendering of the tex string as an RGBA array. Examples -------- >>> texmanager = TexManager() >>> s = r\"\\TeX\\ is $\\displaystyle\\sum_n\\frac{-e^{i\\pi}}{2^n}$!\" >>> Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1, 0, 0))", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:get_rgba arg:cls arg:tex arg:fontsize arg:dpi arg:rgb arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_squared_difference_flops", + "source_code": "@ops.RegisterStatistics('SquaredDifference', 'flops')\ndef _squared_difference_flops(graph, node):\n return _binary_per_element_op_flops(graph, node, ops_per_element=2)", + "docstring": "Compute flops for SquaredDifference operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_squared_difference_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "upload_interrupted", + "source_code": "def upload_interrupted(self):\n pass", + "docstring": "Signal that the upload was interrupted. Subclasses should perform cleanup that is necessary for this handler.", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadhandler.py", + "ast_data": "FunctionDef name:upload_interrupted arg:self arguments arg" + }, + { + "library": "numpy", + "name": "legpow", + "source_code": "def legpow(c, pow, maxpower=16):\n return pu._pow(legmul, c, pow, maxpower)", + "docstring": "Raise a Legendre series to a power. Returns the Legendre series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Legendre series of power. See Also -------- legadd, legsub, legmulx, legmul, legdiv", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legpow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "stop_recording", + "source_code": "@contextlib.contextmanager\ndef stop_recording():\n is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped()\n try:\n if not is_stopped:\n pywrap_tfe.TFE_Py_TapeSetStopOnThread()\n yield\n finally:\n if not is_stopped:\n pywrap_tfe.TFE_Py_TapeSetRestartOnThread()", + "docstring": "Stop all gradient recording (backprop and forwardprop).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", + "ast_data": "FunctionDef name:stop_recording arguments Assign Call Try If Call If Call" + }, + { + "library": "sphinx", + "name": "new_navpoint", + "source_code": "def new_navpoint(self, node: dict[str, Any], level: int, incr: bool=True) -> NavPoint:\n if incr:\n self.playorder += 1\n self.tocid += 1\n return NavPoint(f'navPoint{self.tocid}', self.playorder, node['text'], node['refuri'], [])", + "docstring": "Create a new entry in the toc from the node at given level.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:new_navpoint arg:self arg:node arg:level arg:incr arguments arg arg arg arg If Return return:yes Call" + }, + { + "library": "django", + "name": "from_dict", + "source_code": "@classmethod\ndef from_dict(cls, file_dict):\n return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain'))", + "docstring": "Create a SimpleUploadedFile object from a dictionary with keys: - filename - content-type - content", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadedfile.py", + "ast_data": "FunctionDef name:from_dict arg:cls arg:file_dict arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_MatrixInverseGrad", + "source_code": "@ops.RegisterGradient('MatrixInverse')\ndef _MatrixInverseGrad(op: ops.Operation, grad):\n ainv = op.outputs[0]\n op_adjoint = op.get_attr('adjoint')\n return -math_ops.matmul(ainv, math_ops.matmul(grad, ainv, adjoint_a=op_adjoint, adjoint_b=not op_adjoint), adjoint_a=not op_adjoint)", + "docstring": "Gradient for MatrixInverse.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_MatrixInverseGrad arg:op arg:grad arguments arg arg Assign Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "StrategyV1", + "source_code": "@tf_export(v1=['distribute.Strategy'])\nclass StrategyV1(StrategyBase):\n\n def make_dataset_iterator(self, dataset):\n return self._extended._make_dataset_iterator(dataset)\n\n def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER):\n return super(StrategyV1, self).make_input_fn_iterator(input_fn, replication_mode)\n\n def experimental_make_numpy_dataset(self, numpy_input, session=None):\n return self.extended.experimental_make_numpy_dataset(numpy_input, session=session)\n\n @deprecated(None, 'This method is not available in TF 2.x. Please switch to using `run` instead.')\n def experimental_run(self, fn, input_iterator=None):\n return super(StrategyV1, self).experimental_run(fn, input_iterator)\n\n def reduce(self, reduce_op, value, axis=None):\n return super(StrategyV1, self).reduce(reduce_op, value, axis)\n reduce.__doc__ = StrategyBase.reduce.__doc__\n\n def update_config_proto(self, config_proto):\n return self._extended._update_config_proto(config_proto)", + "docstring": "A list of devices with a state & compute distribution policy. See [the guide]( for overview and examples. Note: Not all implementations currently support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:StrategyV1 FunctionDef name:make_dataset_iterator arg:self arg:dataset arguments arg arg Return return:yes Call FunctionDef name:make_input_fn_iterator arg:self arg:input_fn arg:replication_mode arguments arg arg arg Return return:yes Call Call FunctionDef name:experimental_make_numpy_dataset arg:self arg:numpy_input arg:session arguments arg arg arg Return return:yes Call FunctionDef name:experimental_run arg:self arg:fn arg:input_iterator arguments arg arg arg Return return:yes Call Call Call FunctionDef name:reduce arg:self arg:reduce_op arg:value arg:axis arguments arg arg arg arg Return return:yes Call Call Assign FunctionDef name:update_config_proto arg:self arg:config_proto arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "hypsecant_gen", + "source_code": "class hypsecant_gen(rv_continuous):\n\n def _shape_info(self):\n return []\n\n def _pdf(self, x):\n return 1.0 / (np.pi * np.cosh(x))\n\n def _cdf(self, x):\n return 2.0 / np.pi * np.arctan(np.exp(x))\n\n def _ppf(self, q):\n return np.log(np.tan(np.pi * q / 2.0))\n\n def _sf(self, x):\n return 2.0 / np.pi * np.arctan(np.exp(-x))\n\n def _isf(self, q):\n return -np.log(np.tan(np.pi * q / 2.0))\n\n def _stats(self):\n return (0, np.pi * np.pi / 4, 0, 2)\n\n def _entropy(self):\n return np.log(2 * np.pi)", + "docstring": "A hyperbolic secant continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x) = \\frac{1}{\\pi} \\text{sech}(x) for a real number :math:. %(after_notes)s %(example)s", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "ClassDef name:hypsecant_gen FunctionDef name:_shape_info arg:self arguments arg Return return:no FunctionDef name:_pdf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arguments arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:_isf arg:self arg:q arguments arg arg Return return:yes Call Call FunctionDef name:_stats arg:self arguments arg Return return:yes FunctionDef name:_entropy arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "ClosedFileError", + "source_code": "class ClosedFileError(Exception):\n pass", + "docstring": "Exception is raised when trying to perform an operation on a closed HDFStore file. `` objects. Once an HDFStore is closed, its resources are no longer available, and any further attempt to access data or perform file operations will raise this exception. See Also -------- HDFStore.close : Closes the PyTables file handle. HDFStore.open : Opens the file in the specified mode. HDFStore.is_open : Returns a boolean indicating whether the file is open. Examples -------- >>> store = pd.HDFStore(\"my-store\", \"a\") # doctest: +SKIP >>> store.close() # doctest: +SKIP >>> store.keys() # doctest: +SKIP ... # ClosedFileError: my-store file is not open!", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:ClosedFileError" + }, + { + "library": "matplotlib", + "name": "_on_paint", + "source_code": "def _on_paint(self, event):\n _log.debug('%s - _on_paint()', type(self))\n drawDC = wx.PaintDC(self)\n if not self._isDrawn:\n self.draw(drawDC=drawDC)\n else:\n self.gui_repaint(drawDC=drawDC)\n drawDC.Destroy()", + "docstring": "Called when wxPaintEvt is generated.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:_on_paint arg:self arg:event arguments arg arg Call Call Assign Call If Call Call Call" + }, + { + "library": "pandas", + "name": "_convert_level_number", + "source_code": "def _convert_level_number(level_num: int, columns: Index):\n if level_num in columns.names:\n return columns.names[level_num]\n return level_num", + "docstring": "Logic for converting the level number to something we can safely pass to swaplevel. If matches a column name return the name from position , otherwise return .", + "type": "function", + "file_path": "pandas\\pandas\\core\\reshape\\reshape.py", + "ast_data": "FunctionDef name:_convert_level_number arg:level_num arg:columns arguments arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "get_template", + "source_code": "def get_template(self, template_name):\n raise NotImplementedError('subclasses of BaseEngine must provide a get_template() method')", + "docstring": "Load and return a template for the given name. Raise TemplateDoesNotExist if no such template exists.", + "type": "method", + "file_path": "django\\django\\template\\backends\\base.py", + "ast_data": "FunctionDef name:get_template arg:self arg:template_name arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "NoneTensor", + "source_code": "class NoneTensor(composite_tensor.CompositeTensor):\n\n @property\n def _type_spec(self):\n return NoneTensorSpec()", + "docstring": "Composite tensor representation for value.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\none_tensor.py", + "ast_data": "ClassDef name:NoneTensor FunctionDef name:_type_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "find_coalesced_group_with_non_p2p", + "source_code": "def find_coalesced_group_with_non_p2p(pg_name: str, entries: list[dict[str, Any]], _pg_guids: dict[tuple[str, int], str], rank: int) -> list[tuple[int, dict[str, Any]]]:\n found = []\n collective_seq_id = None\n for i, e in enumerate(entries):\n if _pg_guids[e['process_group'][0], rank] != pg_name:\n continue\n elif collective_seq_id is None:\n collective_seq_id = e['p2p_seq_id'] if e['is_p2p'] else e['collective_seq_id']\n found.append((i, e))\n elif not e['is_p2p'] and e['collective_seq_id'] == collective_seq_id:\n found.append((i, e))\n elif e['is_p2p'] and e['p2p_seq_id'] == collective_seq_id:\n found.append((i, e))\n else:\n break\n if len(found) > 1:\n name = found[-1][1]['profiling_name']\n if name.startswith('nccl:') and (not name.endswith('_coalesced')):\n logger.error('Rank %s does not have a coalesced end.', rank)\n return found\n return []", + "docstring": "Given a list of entries, if the collective_seq_id of the first entry matches that of subsequent ones, build an return a list of entries terminating in a 'coalesced' op entry all sharing a collective_seq_id", + "type": "function", + "file_path": "pytorch\\tools\\flight_recorder\\components\\utils.py", + "ast_data": "FunctionDef name:find_coalesced_group_with_non_p2p arg:pg_name arg:entries arg:_pg_guids arg:rank arguments arg arg arg arg Assign Assign For Call If Compare If Compare Assign Call If BoolOp Compare Call If BoolOp Compare Call If Compare Call Assign If BoolOp Call Call Call Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "colocate_with", + "source_code": "@tf_contextlib.contextmanager\ndef colocate_with(self, op, ignore_existing=False) -> Iterator[None]:\n if op is None and (not ignore_existing):\n raise ValueError('Trying to reset colocation (op is None) but ignore_existing is not True')\n op, device_only_candidate = _op_to_colocate_with(op, self)\n device_fn_tmp = self._device_function_stack\n self._device_function_stack = traceable_stack.TraceableStack()\n if ignore_existing:\n current_stack = self._colocation_stack\n self._colocation_stack = traceable_stack.TraceableStack()\n if op is not None:\n self._colocation_stack.push_obj(op, offset=4)\n if device_only_candidate is not None:\n self._colocation_stack.push_obj(device_only_candidate, offset=4)\n elif not ignore_existing:\n raise ValueError('Trying to reset colocation (op is None) but ignore_existing is not True')\n try:\n yield\n finally:\n self._device_function_stack = device_fn_tmp\n if op is not None:\n self._colocation_stack.pop_obj()\n if device_only_candidate is not None:\n self._colocation_stack.pop_obj()\n if ignore_existing:\n self._colocation_stack = current_stack", + "docstring": "Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: and will always be colocated with , no matter where is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If is then must be and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or . ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If is , this value must be . Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:colocate_with arg:self arg:op arg:ignore_existing arguments arg arg arg If BoolOp Compare Raise Call Assign Call Assign Assign Call If Assign Assign Call If Compare Call If Compare Call If Raise Call Try Assign If Compare Call If Compare Call If Assign" + }, + { + "library": "tensorflow", + "name": "swap_tensor_content_in_graph_function", + "source_code": "def swap_tensor_content_in_graph_function(graph_def, from_endiness, to_endiness):\n if isinstance(graph_def, meta_graph_pb2.MetaGraphDef):\n functions = graph_def.graph_def.library.function\n elif isinstance(graph_def, graph_pb2.GraphDef):\n functions = graph_def.library.function\n else:\n return\n for function in functions:\n node_def = function.node_def\n for node in node_def:\n if node.op == 'Const':\n tensor = node.attr['value'].tensor\n byte_swap_tensor_content(tensor, from_endiness, to_endiness)", + "docstring": "Fix endiness of tensor contents. Args: graph_def: Target graph_def to change endiness. from_endiness: The original endianness format. \"big\" or \"little\" to_endiness: The target endianness format. \"big\" or \"little\"", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\byte_swap_tensor.py", + "ast_data": "FunctionDef name:swap_tensor_content_in_graph_function arg:graph_def arg:from_endiness arg:to_endiness arguments arg arg arg If Call Assign If Call Assign Return return:no For Assign For If Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "Formatter", + "source_code": "class Formatter(TickHelper):\n locs = []\n\n def __call__(self, x, pos=None):\n raise NotImplementedError('Derived must override')\n\n def format_ticks(self, values):\n self.set_locs(values)\n return [self(value, i) for i, value in enumerate(values)]\n\n def format_data(self, value):\n return self.__call__(value)\n\n def format_data_short(self, value):\n return self.format_data(value)\n\n def get_offset(self):\n return ''\n\n def set_locs(self, locs):\n self.locs = locs\n\n @staticmethod\n def fix_minus(s):\n return s.replace('-', '−') if mpl.rcParams['axes.unicode_minus'] else s\n\n def _set_locator(self, locator):\n pass", + "docstring": "Create a string based on a tick value and location.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "ClassDef name:Formatter Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Raise Call FunctionDef name:format_ticks arg:self arg:values arguments arg arg Call Return return:yes Call Call FunctionDef name:format_data arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:format_data_short arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:get_offset arg:self arguments arg Return return:yes FunctionDef name:set_locs arg:self arg:locs arguments arg arg Assign FunctionDef name:fix_minus arg:s arguments arg Return return:yes Call FunctionDef name:_set_locator arg:self arg:locator arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_variable_call", + "source_code": "@classmethod\ndef _variable_call(cls, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE, shape=None, experimental_enable_variable_lifting=None, expected_shape=None, collections=None, use_resource=None, **kwargs):\n if cls is not VariableV1:\n return None\n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n for _, getter in ops.get_default_graph()._variable_creator_stack:\n previous_getter = variables._make_getter(getter, previous_getter)\n if aggregation is None:\n aggregation = variables.VariableAggregation.NONE\n return previous_getter(initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, import_scope=import_scope, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape, experimental_enable_variable_lifting=experimental_enable_variable_lifting, expected_shape=expected_shape, collections=collections, use_resource=use_resource)", + "docstring": "VariableV1 class getter. Useful to force the signature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_v1.py", + "ast_data": "FunctionDef name:_variable_call arg:cls arg:initial_value arg:trainable arg:validate_shape arg:caching_device arg:name arg:variable_def arg:dtype arg:import_scope arg:constraint arg:synchronization arg:aggregation arg:shape arg:experimental_enable_variable_lifting arg:expected_shape arg:collections arg:use_resource arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Return return:no Assign arguments arg Call For Call Assign Call If Compare Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "__contains__", + "source_code": "def __contains__(self, key: str) -> bool:\n if self.frame is None:\n return any((key in df for df in self.frames.values()))\n return key in self.frame", + "docstring": "Boolean check on whether a variable is defined in this dataset.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\data.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Compare Return return:yes Call Compare Call Return return:yes Compare" + }, + { + "library": "kornia", + "name": "sobel", + "source_code": "def sobel(input: Tensor, normalized: bool=True, eps: float=1e-06) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(input)\n KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n edges: Tensor = spatial_gradient(input, normalized=normalized)\n gx: Tensor = edges[:, :, 0]\n gy: Tensor = edges[:, :, 1]\n magnitude: Tensor = torch.sqrt(gx * gx + gy * gy + eps)\n return magnitude", + "docstring": "Compute the Sobel operator and returns the magnitude per channel. .. image:: _static/img/sobel.png Args: input: the input image with shape :math:. normalized: if True, L1 norm of the kernel is set to 1. eps: regularization number to avoid NaN during backprop. Return: the sobel edge gradient magnitudes map with shape :math:. .. note:: See a working example __. Example: >>> input = torch.rand(1, 3, 4, 4) >>> output = sobel(input) # 1x3x4x4 >>> output.shape torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\sobel.py", + "ast_data": "FunctionDef name:sobel arg:input arg:normalized arg:eps arguments arg arg arg Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "BlendedAffine2D", + "source_code": "class BlendedAffine2D(_BlendedMixin, Affine2DBase):\n is_separable = True\n\n def __init__(self, x_transform, y_transform, **kwargs):\n is_affine = x_transform.is_affine and y_transform.is_affine\n is_separable = x_transform.is_separable and y_transform.is_separable\n is_correct = is_affine and is_separable\n if not is_correct:\n raise ValueError('Both *x_transform* and *y_transform* must be 2D affine transforms')\n Transform.__init__(self, **kwargs)\n self._x = x_transform\n self._y = y_transform\n self.set_children(x_transform, y_transform)\n Affine2DBase.__init__(self)\n self._mtx = None\n\n def get_matrix(self):\n if self._invalid:\n if self._x == self._y:\n self._mtx = self._x.get_matrix()\n else:\n x_mtx = self._x.get_matrix()\n y_mtx = self._y.get_matrix()\n self._mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])\n self._inverted = None\n self._invalid = 0\n return self._mtx", + "docstring": "A \"blended\" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This version is an optimization for the case where both child transforms are of type .", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "ClassDef name:BlendedAffine2D Assign FunctionDef name:__init__ arg:self arg:x_transform arg:y_transform arguments arg arg arg arg Assign BoolOp Assign BoolOp Assign BoolOp If Raise Call Call Assign Assign Call Call Assign FunctionDef name:get_matrix arg:self arguments arg If If Compare Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_extend_upper", + "source_code": "def _extend_upper(self):\n minmax = 'min' if self.long_axis.get_inverted() else 'max'\n return self.extend in ('both', minmax)", + "docstring": "Return whether the upper limit is open ended.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:_extend_upper arg:self arguments arg Assign Call Return return:yes Compare" + }, + { + "library": "pandas", + "name": "_is_label_or_level_reference", + "source_code": "@final\ndef _is_label_or_level_reference(self, key: Level, axis: AxisInt=0) -> bool:\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis)", + "docstring": "Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_is_label_or_level_reference arg:self arg:key arg:axis arguments arg arg arg Return return:yes BoolOp Call Call" + }, + { + "library": "scikit-learn", + "name": "get_headers", + "source_code": "def get_headers(token):\n return {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}', 'X-GitHub-Api-Version': '2022-11-28'}", + "docstring": "Get the headers for the GitHub API.", + "type": "function", + "file_path": "scikit-learn\\build_tools\\get_comment.py", + "ast_data": "FunctionDef name:get_headers arg:token arguments arg Return return:yes" + }, + { + "library": "django", + "name": "W", + "source_code": "def W(self):\n return self.data.isocalendar().week", + "docstring": "ISO-8601 week number of year, weeks starting on Monday", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:W arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "replace_random_passes", + "source_code": "def replace_random_passes(gm: torch.fx.GraphModule):\n if config.fallback_random:\n return 0\n count = patterns.apply(gm)\n with GraphTransformObserver(gm, 'fuse_seed_creation_pass'):\n count += fuse_seed_creation_pass(gm.graph)\n return count", + "docstring": "Modify the given FX graph to use backend-native random ops", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\replace_random.py", + "ast_data": "FunctionDef name:replace_random_passes arg:gm arguments arg If Return return:yes Assign Call With Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_normplot", + "source_code": "def _normplot(method, x, la, lb, plot=None, N=80):\n if method == 'boxcox':\n title = 'Box-Cox Normality Plot'\n transform_func = boxcox\n else:\n title = 'Yeo-Johnson Normality Plot'\n transform_func = yeojohnson\n x = np.asarray(x)\n if x.size == 0:\n return x\n if lb <= la:\n raise ValueError('`lb` has to be larger than `la`.')\n if method == 'boxcox' and np.any(x <= 0):\n raise ValueError('Data must be positive.')\n lmbdas = np.linspace(la, lb, num=N)\n ppcc = lmbdas * 0.0\n for i, val in enumerate(lmbdas):\n z = transform_func(x, lmbda=val)\n _, (_, _, r) = probplot(z, dist='norm', fit=True)\n ppcc[i] = r\n if plot is not None:\n plot.plot(lmbdas, ppcc, 'x')\n _add_axis_labels_title(plot, xlabel='$\\\\lambda$', ylabel='Prob Plot Corr. Coef.', title=title)\n return (lmbdas, ppcc)", + "docstring": "Compute parameters for a Box-Cox or Yeo-Johnson normality plot, optionally show it. See or for details.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_morestats.py", + "ast_data": "FunctionDef name:_normplot arg:method arg:x arg:la arg:lb arg:plot arg:N arguments arg arg arg arg arg arg If Compare Assign Assign Assign Assign Assign Call If Compare Return return:yes If Compare Raise Call If BoolOp Compare Call Compare Raise Call Assign Call Assign For Call Assign Call Assign Call Assign If Compare Call Call Return return:yes" + }, + { + "library": "django", + "name": "__contains__", + "source_code": "def __contains__(self, key):\n return self.has_key(key)", + "docstring": "Return True if the key is in the cache and has not expired.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)", + "docstring": "Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "argsreduce", + "source_code": "def argsreduce(cond, *args):\n newargs = np.atleast_1d(*args)\n if not isinstance(newargs, list | tuple):\n newargs = (newargs,)\n if np.all(cond):\n *newargs, cond = np.broadcast_arrays(*newargs, cond)\n return [arg.ravel() for arg in newargs]\n s = cond.shape\n return [arg if np.size(arg) == 1 else np.extract(cond, np.broadcast_to(arg, s)) for arg in newargs]", + "docstring": "Clean arguments to: 1. Ensure all arguments are iterable (arrays of dimension at least one 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is True, in 1D. Return list of processed arguments. Examples -------- >>> import numpy as np >>> from scipy.stats._distn_infrastructure import argsreduce >>> rng = np.random.default_rng() >>> A = rng.random((4, 5)) >>> B = 2 >>> C = rng.random((1, 5)) >>> cond = np.ones(A.shape) >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (4, 5) >>> B1.shape (1,) >>> C1.shape (1, 5) >>> cond[2,:] = 0 >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (15,) >>> B1.shape (1,) >>> C1.shape (15,)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:argsreduce arg:cond arguments arg arg Assign Call If Call Assign If Call Assign Call Return return:yes Call Assign Return return:yes Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "mark_dirty", + "source_code": "@abstractmethod\ndef mark_dirty(self) -> None:\n pass", + "docstring": "Mark the local state as dirty.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:mark_dirty arg:self arguments arg" + }, + { + "library": "sphinx", + "name": "DoctestTransform", + "source_code": "class DoctestTransform(SphinxTransform):\n default_priority = 500\n\n def apply(self, **kwargs: Any) -> None:\n for node in self.document.findall(nodes.doctest_block):\n node['classes'].append('doctest')", + "docstring": "Set \"doctest\" style to each doctest_block node", + "type": "class", + "file_path": "sphinx\\sphinx\\transforms\\__init__.py", + "ast_data": "ClassDef name:DoctestTransform Assign FunctionDef name:apply arg:self arguments arg arg For Call Call" + }, + { + "library": "scipy", + "name": "_root_scalar_newton_doc", + "source_code": "def _root_scalar_newton_doc():\n pass", + "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function and its derivative. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. fprime : bool or callable, optional If is a boolean and is True, is assumed to return the value of derivative along with the objective function. can also be a callable returning the derivative of . In this case, it must accept the same arguments as . options: dict, optional Specifies any method-specific options not covered above.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_root_scalar.py", + "ast_data": "FunctionDef name:_root_scalar_newton_doc arguments" + }, + { + "library": "pandas", + "name": "nlargest", + "source_code": "def nlargest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series:\n return selectn.SelectNSeries(self, n=n, keep=keep).nlargest()", + "docstring": "Return the largest elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of elements: - `nnnnnnnnnkeepnn` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep=\"all\") France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:nlargest arg:self arg:n arg:keep arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "debug_op", + "source_code": "@property\ndef debug_op(self):\n return self._debug_op", + "docstring": "Name of the debug op. Returns: () debug op name (e.g., ).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:debug_op arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "herme2poly", + "source_code": "def herme2poly(c):\n from .polynomial import polyadd, polymulx, polysub\n [c] = pu.as_series([c])\n n = len(c)\n if n == 1:\n return c\n if n == 2:\n return c\n else:\n c0 = c[-2]\n c1 = c[-1]\n for i in range(n - 1, 1, -1):\n tmp = c0\n c0 = polysub(c[i - 2], c1 * (i - 1))\n c1 = polyadd(tmp, polymulx(c1))\n return polyadd(c0, polymulx(c1))", + "docstring": "Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest order term to highest. See Also -------- poly2herme Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import herme2poly >>> herme2poly([ 2., 10., 2., 3.]) array([0., 1., 2., 3.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:herme2poly arg:c arguments arg Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Assign For Call Assign Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "main", + "source_code": "def main(wheel_dirname):\n if not op.exists(VCOMP140_SRC_PATH):\n raise ValueError(f'Could not find {VCOMP140_SRC_PATH}.')\n if not op.exists(MSVCP140_SRC_PATH):\n raise ValueError(f'Could not find {MSVCP140_SRC_PATH}.')\n if not op.isdir(wheel_dirname):\n raise RuntimeError(f'Could not find {wheel_dirname} file.')\n vcomp140_dll_filename = op.basename(VCOMP140_SRC_PATH)\n msvcp140_dll_filename = op.basename(MSVCP140_SRC_PATH)\n target_folder = op.join(wheel_dirname, TARGET_FOLDER)\n distributor_init = op.join(wheel_dirname, DISTRIBUTOR_INIT)\n if not op.exists(target_folder):\n os.mkdir(target_folder)\n print(f'Copying {VCOMP140_SRC_PATH} to {target_folder}.')\n shutil.copy2(VCOMP140_SRC_PATH, target_folder)\n print(f'Copying {MSVCP140_SRC_PATH} to {target_folder}.')\n shutil.copy2(MSVCP140_SRC_PATH, target_folder)\n print(\"Generating the '_distributor_init.py' file.\")\n make_distributor_init_64_bits(distributor_init, vcomp140_dll_filename, msvcp140_dll_filename)", + "docstring": "Embed vcomp140.dll and msvcp140.dll.", + "type": "function", + "file_path": "scikit-learn\\build_tools\\github\\vendor.py", + "ast_data": "FunctionDef name:main arg:wheel_dirname arguments arg If Call Raise Call If Call Raise Call If Call Raise Call Assign Call Assign Call Assign Call Assign Call If Call Call Call Call Call Call Call Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "@torch.no_grad()\ndef forward(self, images: Tensor, batched_prompts: list[dict[str, Any]], multimask_output: bool) -> list[SegmentationResults]:\n KORNIA_CHECK_SHAPE(images, ['B', '3', 'H', 'W'])\n KORNIA_CHECK(images.shape[0] == len(batched_prompts), 'The number of images (`B`) should match with the length of prompts!')\n image_embeddings = self.image_encoder(images)\n outputs = []\n for prompt_record, curr_embedding in zip(batched_prompts, image_embeddings):\n sparse_embeddings, dense_embeddings = self.prompt_encoder(points=prompt_record.get('points', None), boxes=prompt_record.get('boxes', None), masks=prompt_record.get('mask_inputs', None))\n low_res_logits, iou_predictions = self.mask_decoder(image_embeddings=curr_embedding[None, ...], image_pe=self.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output)\n outputs.append(SegmentationResults(low_res_logits, iou_predictions, self.mask_threshold))\n return outputs", + "docstring": "Predicts masks end-to-end from provided images and prompts. This method expects that the images have already been pre-processed, at least been normalized, resized and padded to be compatible with the . .. note:: For each image :math:, it is possible to input a batch (:math:) of :math: prompts, the results are batched by the number of prompts batch. So given a prompt with :math:, and :math:, the results will look like :math: where :math: is determined by multimask_output. And within each of these masks :math:, it should be possible to find :math: instances if the model succeed. Args: images: The image as a torch tensor in :math: format, already transformed for input to the model. batched_prompts: A list over the batch of images (list length should be :math:), each a dictionary with the following keys. If it does not have the respective prompt, it should not be included in this dictionary. The options are: - \"points\": tuple of (Tensor, Tensor) within the coordinate keypoints and their respective labels. the tuple should look like (keypoints, labels), where: - The keypoints (a tensor) are a batched point prompts for this image, with shape :math:. Already transformed to the input frame of the model. - The labels (a tensor) are a batched labels for point prompts, with shape :math:. Where 1 indicates a foreground point and 0 indicates a background point. - \"boxes\": (Tensor) Batched box inputs, with shape :math:. Already transformed to the input frame of the model. - \"mask_inputs\": (Tensor) Batched mask inputs to the model, in the form :math:. multimask_output: Whether the model should predict multiple disambiguating masks, or return a single mask. Returns: A list over input images, where each element is as SegmentationResults the following. - logits: Low resolution logits with shape :math:. Can be passed as mask input to subsequent iterations of prediction. Where :math: is the number of input prompts, :math: is determined by multimask_output, and :math: are the model output size. - scores: The model's predictions of mask quality (iou prediction), in shape BxC.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py", + "ast_data": "FunctionDef name:forward arg:self arg:images arg:batched_prompts arg:multimask_output arguments arg arg arg arg Call Call Compare Call Assign Call Assign For Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_prepare", + "source_code": "def _prepare(self):\n pass", + "docstring": "Create all needed tensors before applying gradients. This is called with the name_scope using the \"name\" that users have chosen for the application of gradients.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_prepare arg:self arguments arg" + }, + { + "library": "pandas", + "name": "between", + "source_code": "def between(self, left, right, inclusive: Literal['both', 'neither', 'left', 'right']='both') -> Series:\n if inclusive == 'both':\n lmask = self >= left\n rmask = self <= right\n elif inclusive == 'left':\n lmask = self >= left\n rmask = self < right\n elif inclusive == 'right':\n lmask = self > left\n rmask = self <= right\n elif inclusive == 'neither':\n lmask = self > left\n rmask = self < right\n else:\n raise ValueError(\"Inclusive has to be either string of 'both','left', 'right', or 'neither'.\")\n return lmask & rmask", + "docstring": "Return boolean Series equivalent to left >> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With set to `leftright` can be any scalar value: >>> s = pd.Series([\"Alice\", \"Bob\", \"Carol\", \"Eve\"]) >>> s.between(\"Anna\", \"Daniel\") 0 False 1 True 2 True 3 False dtype: bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:between arg:self arg:left arg:right arg:inclusive arguments arg arg arg arg If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "check_symmetric", + "source_code": "def check_symmetric(array, *, tol=1e-10, raise_warning=True, raise_exception=False):\n if array.ndim != 2 or array.shape[0] != array.shape[1]:\n raise ValueError('array must be 2-dimensional and square. shape = {0}'.format(array.shape))\n if sp.issparse(array):\n diff = array - array.T\n if diff.format not in ['csr', 'csc', 'coo']:\n diff = diff.tocsr()\n symmetric = np.all(abs(diff.data) < tol)\n else:\n symmetric = np.allclose(array, array.T, atol=tol)\n if not symmetric:\n if raise_exception:\n raise ValueError('Array must be symmetric')\n if raise_warning:\n warnings.warn('Array is not symmetric, and will be converted to symmetric by average with its transpose.', stacklevel=2)\n if sp.issparse(array):\n conversion = 'to' + array.format\n array = getattr(0.5 * (array + array.T), conversion)()\n else:\n array = 0.5 * (array + array.T)\n return array", + "docstring": "Make sure that array is 2D, square and symmetric. If the array is not symmetric, then a symmetrized version is returned. Optionally, a warning or exception is raised if the matrix is not symmetric. Parameters ---------- array : {ndarray, sparse matrix} Input object to check / convert. Must be two-dimensional and square, otherwise a ValueError will be raised. tol : float, default=1e-10 Absolute tolerance for equivalence of arrays. Default = 1E-10. raise_warning : bool, default=True If True then raise a warning if conversion is required. raise_exception : bool, default=False If True then raise an exception if array is not symmetric. Returns ------- array_sym : {ndarray, sparse matrix} Symmetrized version of the input array, i.e. the average of array and array.transpose(). If sparse, then duplicate entries are first summed and zeros are eliminated. Examples -------- >>> import numpy as np >>> from sklearn.utils.validation import check_symmetric >>> symmetric_array = np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> check_symmetric(symmetric_array) array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> from scipy.sparse import csr_matrix >>> sparse_symmetric_array = csr_matrix(symmetric_array) >>> check_symmetric(sparse_symmetric_array)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:check_symmetric arg:array arguments arg arg arg arg If BoolOp Compare Compare Raise Call Call If Call Assign If Compare Assign Call Assign Call Compare Call Assign Call If If Raise Call If Call If Call Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "authlib", + "name": "get_auth_time", + "source_code": "def get_auth_time(self, user) -> Optional[int]:\n return None", + "docstring": "User authentication time. Time when the End-User authentication occurred. Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z as measured in UTC until the date/time. Developers MAY re-implement this method:: def get_auth_time(self, user): return datetime.timestamp(user.get_auth_time())", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py", + "ast_data": "FunctionDef name:get_auth_time arg:self arg:user arguments arg arg Return return:no" + }, + { + "library": "pytorch", + "name": "get_loop_body_lowp_fp", + "source_code": "def get_loop_body_lowp_fp(_body: LoopBody) -> tuple[Optional[torch.dtype], bool]:\n sub_blocks = [_body.root_block] + list(_body.subblocks.values())\n _lowp_fp_type: Optional[torch.dtype] = None\n _use_fp32 = False\n for sub_block in sub_blocks:\n for _node in sub_block.graph.nodes:\n if _node.op == 'placeholder' or _node.target in ('get_index', 'index_expr'):\n continue\n if _node.target not in ['load', 'store', 'abs', 'neg', 'output']:\n _use_fp32 = True\n if hasattr(_node, 'meta') and _node.meta:\n assert OptimizationContext.key in _node.meta\n opt_ctx: OptimizationContext = _node.meta[OptimizationContext.key]\n if not opt_ctx.dtype or opt_ctx.dtype not in DTYPE_LOWP_FP:\n _use_fp32 = True\n elif _lowp_fp_type is not None:\n if _lowp_fp_type != opt_ctx.dtype:\n warnings.warn('bf16 and fp16 are mixed in the scheduler node.')\n else:\n _lowp_fp_type = opt_ctx.dtype\n else:\n _use_fp32 = True\n return (_lowp_fp_type, _use_fp32)", + "docstring": "Returns the low precision data type (torch.float16/torch.bfloat16) contained in the nodes and if all the nodes can codegen with this data type without converting to float. Otherwise returns None and True.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py", + "ast_data": "FunctionDef name:get_loop_body_lowp_fp arg:_body arguments arg Assign Call Call Assign For For If BoolOp Compare Compare If Compare Assign If BoolOp Call Compare If BoolOp Compare Assign If Compare If Compare Call Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "fftconvolve", + "source_code": "def fftconvolve(in1, in2, mode='full', axes=None):\n xp = array_namespace(in1, in2)\n in1 = xp.asarray(in1)\n in2 = xp.asarray(in2)\n if in1.ndim == in2.ndim == 0:\n return in1 * in2\n elif in1.ndim != in2.ndim:\n raise ValueError('in1 and in2 should have the same dimensionality')\n elif xp_size(in1) == 0 or xp_size(in2) == 0:\n return xp.asarray([])\n in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False)\n s1 = in1.shape\n s2 = in2.shape\n shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)]\n ret = _freq_domain_conv(xp, in1, in2, axes, shape, calc_fast_len=True)\n return _apply_conv_mode(ret, s1, s2, mode, axes, xp=xp)", + "docstring": "Convolve two N-dimensional arrays using FFT. Convolve and using the fast Fourier transform method, with the output size determined by the argument. This is generally much faster than for large arrays (n > ~500), but can be slower when only a few output values are needed, and can only output float arrays (int or object array inputs will be cast to float). As of v0.19, automatically chooses this method or the direct method based on an estimation of which is faster. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as . mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: `in1in2in1in1in2convolve2d` function allows for other types of image boundaries, but is far slower. >>> from scipy import datasets >>> face = datasets.face(gray=True) >>> kernel = np.outer(signal.windows.gaussian(70, 8), ... signal.windows.gaussian(70, 8)) >>> blurred = signal.fftconvolve(face, kernel, mode='same') >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_kernel.imshow(kernel, cmap='gray') >>> ax_kernel.set_title('Gaussian kernel') >>> ax_kernel.set_axis_off() >>> ax_blurred.imshow(blurred, cmap='gray') >>> ax_blurred.set_title('Blurred') >>> ax_blurred.set_axis_off() >>> fig.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_signaltools.py", + "ast_data": "FunctionDef name:fftconvolve arg:in1 arg:in2 arg:mode arg:axes arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:yes If Compare Raise Call If BoolOp Compare Call Compare Call Return return:yes Call Assign Call Assign Assign Assign Compare Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_m_step", + "source_code": "@abstractmethod\ndef _m_step(self, X, log_resp):\n pass", + "docstring": "M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_m_step arg:self arg:X arg:log_resp arguments arg arg arg" + }, + { + "library": "pandas", + "name": "_convert_expression", + "source_code": "def _convert_expression(expr) -> str:\n s = pprint_thing(expr)\n _check_expression(s)\n return s", + "docstring": "Convert an object to an expression. This function converts an object to an expression (a unicode string) and checks to make sure it isn't empty after conversion. This is used to convert operators to their string representation for recursive calls to :func:. Parameters ---------- expr : object The object to be converted to a string. Returns ------- str The string representation of an object. Raises ------ ValueError * If the expression is empty.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\eval.py", + "ast_data": "FunctionDef name:_convert_expression arg:expr arguments arg Assign Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "set_vary_header", + "source_code": "def set_vary_header(response, header_name):\n varies = response.headers.get('Vary', '')\n varies = [x.strip() for x in varies.split(',') if x.strip()]\n if header_name not in varies:\n varies.append(header_name)\n response.headers['Vary'] = ', '.join(varies)", + "docstring": "Add a Vary header to a response.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:set_vary_header arg:response arg:header_name arguments arg arg Assign Call Assign Call Call Call If Compare Call Assign Call" + }, + { + "library": "matplotlib", + "name": "_norm_angle", + "source_code": "def _norm_angle(a):\n a = (a + 360) % 360\n if a > 180:\n a = a - 360\n return a", + "docstring": "Return the given angle normalized to -180 < *a* <= 180 degrees.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_norm_angle arg:a arguments arg Assign If Compare Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "SphinxLogRecordTranslator", + "source_code": "class SphinxLogRecordTranslator(logging.Filter):\n LogRecordClass: type[logging.LogRecord]\n\n def __init__(self, app: Sphinx) -> None:\n self.app = app\n super().__init__()\n\n def filter(self, record: SphinxWarningLogRecord) -> bool:\n if isinstance(record, logging.LogRecord):\n record.__class__ = self.LogRecordClass\n location = getattr(record, 'location', None)\n if isinstance(location, tuple):\n docname, lineno = location\n if docname:\n if lineno:\n record.location = f'{self.app.env.doc2path(docname)}:{lineno}'\n else:\n record.location = f'{self.app.env.doc2path(docname)}'\n else:\n record.location = None\n elif isinstance(location, nodes.Node):\n record.location = get_node_location(location)\n elif location and ':' not in location:\n record.location = f'{self.app.env.doc2path(location)}'\n return True", + "docstring": "Converts a log record to one Sphinx expects * Make a instance of SphinxLogRecord * docname to path if location given * append warning type/subtype to message if :confval: is ``", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "ClassDef name:SphinxLogRecordTranslator FunctionDef name:__init__ arg:self arg:app arguments arg arg Assign Call Call FunctionDef name:filter arg:self arg:record arguments arg arg If Call Assign Assign Call If Call Assign If If Assign Call Assign Call Assign If Call Assign Call If BoolOp Compare Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "chi2", + "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like']}, prefer_skip_nested_validation=True)\ndef chi2(X, y):\n X = check_array(X, accept_sparse='csr', dtype=(np.float64, np.float32))\n if np.any((X.data if issparse(X) else X) < 0):\n raise ValueError('Input X must be non-negative.')\n Y = LabelBinarizer(sparse_output=True).fit_transform(y)\n if Y.shape[1] == 1:\n Y = Y.toarray()\n Y = np.append(1 - Y, Y, axis=1)\n observed = safe_sparse_dot(Y.T, X)\n if issparse(observed):\n observed = observed.toarray()\n feature_count = X.sum(axis=0).reshape(1, -1)\n class_prob = Y.mean(axis=0).reshape(1, -1)\n expected = np.dot(class_prob.T, feature_count)\n return _chisquare(observed, expected)", + "docstring": "Compute chi-squared stats between each non-negative feature and class. This score can be used to select the features with the highest values for the test chi-squared statistic from X, which must contain only **non-negative integer feature values** such as booleans or frequencies (e.g., term counts in document classification), relative to the classes. If some of your features are continuous, you need to bin them, for example by using :class:. Recall that the chi-square test measures dependence between stochastic variables, so using this function \"weeds out\" the features that are the most likely to be independent of class and therefore irrelevant for classification. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample vectors. y : array-like of shape (n_samples,) Target vector (class labels). Returns ------- chi2 : ndarray of shape (n_features,) Chi2 statistics for each feature. p_values : ndarray of shape (n_features,) P-values for each feature. See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. f_regression : F-value between label/feature for regression tasks. Notes ----- Complexity of this algorithm is O(n_classes * n_features). Examples -------- >>> import numpy as np >>> from sklearn.feature_selection import chi2 >>> X = np.array([[1, 1, 3], ... [0, 1, 5], ... [5, 4, 1], ... [6, 6, 2], ... [1, 4, 0], ... [0, 0, 0]]) >>> y = np.array([1, 1, 0, 0, 2, 2]) >>> chi2_stats, p_values = chi2(X, y) >>> chi2_stats array([15.3, 6.5 , 8.9]) >>> p_values array([0.000456, 0.0387, 0.0116 ])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py", + "ast_data": "FunctionDef name:chi2 arg:X arg:y arguments arg arg Assign Call If Call Compare Call Raise Call Assign Call Call If Compare Assign Call Assign Call Assign Call If Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "design_matrix", + "source_code": "@classmethod\ndef design_matrix(cls, xvals, t, k, extrapolate=True):\n xvals = np.asarray(xvals, dtype=float)\n ndim = xvals.shape[-1]\n if len(t) != ndim:\n raise ValueError(f'Data and knots are inconsistent: len(t) = {len(t)} for ndim = {ndim!r}.')\n k, _indices_k1d, (_t, len_t) = _preprocess_inputs(k, t)\n c_shape = tuple((len_t[d] - k[d] - 1 for d in range(ndim)))\n cs = c_shape[1:] + (1,)\n cstrides = np.cumprod(cs[::-1], dtype=np.int64)[::-1].copy()\n data, indices, indptr = _dierckx._coloc_nd(xvals, _t, len_t, k, _indices_k1d, cstrides)\n return csr_array((data, indices, indptr))", + "docstring": "Construct the design matrix as a CSR format sparse array. Parameters ---------- xvals : ndarray, shape(npts, ndim) Data points. `ValueErrorxvals` and contains values of b-spline basis elements which are non-zero at this value.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_ndbspline.py", + "ast_data": "FunctionDef name:design_matrix arg:cls arg:xvals arg:t arg:k arg:extrapolate arguments arg arg arg arg arg Assign Call Assign If Compare Call Raise Call Call Assign Call Assign Call Call Assign Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_threadpool_controller_decorator", + "source_code": "def _threadpool_controller_decorator(limits=1, user_api='blas'):\n\n def decorator(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n controller = _get_threadpool_controller()\n with controller.limit(limits=limits, user_api=user_api):\n return func(*args, **kwargs)\n return wrapper\n return decorator", + "docstring": "Decorator to limit the number of threads used at the function level. It should be preferred over because this one only loads the shared libraries when the function is called while the latter loads them at import time.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\parallel.py", + "ast_data": "FunctionDef name:_threadpool_controller_decorator arg:limits arg:user_api arguments arg arg FunctionDef name:decorator arg:func arguments arg FunctionDef name:wrapper arguments arg arg Assign Call With Call Return return:yes Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "GetWhileContext", + "source_code": "def GetWhileContext(op):\n ctxt = op._get_control_flow_context()\n if ctxt:\n ctxt = ctxt.GetWhileContext()\n return ctxt", + "docstring": "Get the WhileContext to which this op belongs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:GetWhileContext arg:op arguments arg Assign Call If Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "trim_front", + "source_code": "def trim_front(strings: list[str]) -> list[str]:\n if not strings:\n return strings\n smallest_leading_space = min((len(x) - len(x.lstrip()) for x in strings))\n if smallest_leading_space > 0:\n strings = [x[smallest_leading_space:] for x in strings]\n return strings", + "docstring": "Trims leading spaces evenly among all strings. Examples -------- >>> trim_front([\" a\", \" b\"]) ['a', 'b'] >>> trim_front([\" a\", \" \"]) ['a', '']", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:trim_front arg:strings arguments arg If Return return:yes Assign Call Call Call Call If Compare Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_get_empty_indexer", + "source_code": "def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n return (np.array([], dtype=np.intp), np.array([], dtype=np.intp))", + "docstring": "Return empty join indexers.", + "type": "function", + "file_path": "pandas\\pandas\\core\\reshape\\merge.py", + "ast_data": "FunctionDef name:_get_empty_indexer arguments Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "add_cell", + "source_code": "def add_cell(self, cell: Cell) -> None:\n while self[self.current_line, self.current_col]:\n self.current_col += 1\n self[self.current_line, self.current_col] = cell\n self.current_col += cell.colspan", + "docstring": "Add a cell to the current line, to use with `` BEFORE inserting it into the table.", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\text.py", + "ast_data": "FunctionDef name:add_cell arg:self arg:cell arguments arg arg While Assign" + }, + { + "library": "matplotlib", + "name": "set_verts_and_codes", + "source_code": "def set_verts_and_codes(self, verts, codes):\n self.set_verts(verts, closed=False)\n self._codes3d = codes", + "docstring": "Set 3D vertices with path codes.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_verts_and_codes arg:self arg:verts arg:codes arguments arg arg arg Call Assign" + }, + { + "library": "pytorch", + "name": "LocalResponseNorm", + "source_code": "class LocalResponseNorm(Module):\n __constants__ = ['size', 'alpha', 'beta', 'k']\n size: int\n alpha: float\n beta: float\n k: float\n\n def __init__(self, size: int, alpha: float=0.0001, beta: float=0.75, k: float=1.0) -> None:\n super().__init__()\n self.size = size\n self.alpha = alpha\n self.beta = beta\n self.k = k\n\n def forward(self, input: Tensor) -> Tensor:\n return F.local_response_norm(input, self.size, self.alpha, self.beta, self.k)\n\n def extra_repr(self):\n return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)", + "docstring": "Applies local response normalization over an input signal. The input signal is composed of several input planes, where channels occupy the second dimension. Applies normalization across channels. .. math:: b_{c} = a_{c}\\left(k + \\frac{\\alpha}{n} \\sum_{c'=\\max(0, c-n/2)}^{\\min(N-1,c+n/2)}a_{c'}^2\\right)^{-\\beta} Args: size: amount of neighbouring channels used for normalization alpha: multiplicative factor. Default: 0.0001 beta: exponent. Default: 0.75 k: additive factor. Default: 1 Shape: - Input: :math: - Output: :math: (same shape as input) Examples:: >>> lrn = nn.LocalResponseNorm(2) >>> signal_2d = torch.randn(32, 5, 24, 24) >>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7) >>> output_2d = lrn(signal_2d) >>> output_4d = lrn(signal_4d)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\normalization.py", + "ast_data": "ClassDef name:LocalResponseNorm Assign FunctionDef name:__init__ arg:self arg:size arg:alpha arg:beta arg:k arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_broadcast_array_shapes_remove_axis", + "source_code": "def _broadcast_array_shapes_remove_axis(arrays, axis=None):\n shapes = [arr.shape for arr in arrays]\n return _broadcast_shapes_remove_axis(shapes, axis)", + "docstring": "Broadcast shapes of arrays, dropping specified axes Given a sequence of arrays and an integer or tuple , find the shape of the broadcast result after consuming/dropping . In other words, return output shape of a typical hypothesis test on vectorized along . Examples -------- >>> import numpy as np >>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes_remove_axis >>> a = np.zeros((5, 2, 1)) >>> b = np.zeros((9, 3)) >>> _broadcast_array_shapes_remove_axis((a, b), 1) (5, 3)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py", + "ast_data": "FunctionDef name:_broadcast_array_shapes_remove_axis arg:arrays arg:axis arguments arg arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error'):\n super().__init__(mean_absolute_percentage_error, name=name, reduction=reduction)", + "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'mean_absolute_percentage_error'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call" + }, + { + "library": "numpy", + "name": "vander", + "source_code": "def vander(x, n=None):\n _vander = np.vander(x, n)\n m = getmask(x)\n if m is not nomask:\n _vander[m] = 0\n return _vander", + "docstring": "Masked values in the input array result in rows of zeros.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:vander arg:x arg:n arguments arg arg Assign Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "django", + "name": "get_fields", + "source_code": "def get_fields(self, request, obj=None):\n if self.fields:\n return self.fields\n form = self._get_form_for_get_fields(request, obj)\n return [*form.base_fields, *self.get_readonly_fields(request, obj)]", + "docstring": "Hook for specifying fields.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_fields arg:self arg:request arg:obj arguments arg arg arg If Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "staged_predict", + "source_code": "def staged_predict(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n if raw_predictions.shape[1] == 1:\n encoded_classes = (raw_predictions.ravel() > 0).astype(int)\n else:\n encoded_classes = np.argmax(raw_predictions, axis=1)\n yield self.classes_.take(encoded_classes, axis=0)", + "docstring": "Predict classes at each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted classes of the input samples, for each iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg For Call If Compare Assign Call Compare Call Assign Call Call" + }, + { + "library": "django", + "name": "last_arg_byref", + "source_code": "def last_arg_byref(args):\n return args[-1]._obj.value", + "docstring": "Return the last C argument's value by reference.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py", + "ast_data": "FunctionDef name:last_arg_byref arg:args arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "primapow2", + "source_code": "def primapow2(x):\n return x * x", + "docstring": "Believe it or now, x**2 is not always the same as x*x in Python. In Fortran they appear to be identical. Here's a quick one-line to find an example on your system (well, two liner after importing numpy): list(filter(lambda x: x[1], [(x:=np.random.random(), x**2 - x*x != 0) for _ in range(10000)]))", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", + "ast_data": "FunctionDef name:primapow2 arg:x arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "compress_nd", + "source_code": "def compress_nd(x, axis=None):\n x = asarray(x)\n m = getmask(x)\n if axis is None:\n axis = tuple(range(x.ndim))\n else:\n axis = normalize_axis_tuple(axis, x.ndim)\n if m is nomask or not m.any():\n return x._data\n if m.all():\n return nxarray([])\n data = x._data\n for ax in axis:\n axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))\n data = data[(slice(None),) * ax + (~m.any(axis=axes),)]\n return data", + "docstring": "Suppress slices from multiple dimensions which contain masked values. Parameters ---------- x : array_like, MaskedArray The array to operate on. If not a MaskedArray instance (or if no array elements are masked), is interpreted as a MaskedArray with set to . axis : tuple of ints or int, optional Which dimensions to suppress slices from can be configured with this parameter. - If axis is a tuple of ints, those are the axes to suppress slices from. - If axis is an int, then that is the only axis to suppress slices from. - If axis is None, all axis are selected. Returns ------- compress_array : ndarray The compressed array. Examples -------- >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[0, 1], [0, 0]] >>> x = np.ma.array(arr, mask=mask) >>> np.ma.compress_nd(x, axis=0) array([[3, 4]]) >>> np.ma.compress_nd(x, axis=1) array([[1], [3]]) >>> np.ma.compress_nd(x) array([[3]])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:compress_nd arg:x arg:axis arguments arg arg Assign Call Assign Call If Compare Assign Call Call Assign Call If BoolOp Compare Call Return return:yes If Call Return return:yes Call Assign For Assign Call Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_weakly_lesser_type", + "source_code": "def is_weakly_lesser_type(a: type, b: type) -> bool:\n a, b = (_maybe_get_pytype(a), _maybe_get_pytype(b))\n if a not in _ordered_types or b not in _ordered_types:\n raise RuntimeError(f'Expected builtin numeric types, found {a}, {b}')\n for typ in _ordered_types:\n if a == typ:\n return True\n if b == typ:\n return False\n raise RuntimeError('Unexpected termination!')", + "docstring": "Compares two types, a and b, returning True if a is weakly \"less\" than b. The comparison is determined by the following type ordering: bool, int, float, complex.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:is_weakly_lesser_type arg:a arg:b arguments arg arg Assign Call Call If BoolOp Compare Compare Raise Call For If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pandas", + "name": "is_platform_windows", + "source_code": "def is_platform_windows() -> bool:\n return sys.platform in ['win32', 'cygwin']", + "docstring": "Checking if the running platform is windows. Returns ------- bool True if the running platform is windows.", + "type": "function", + "file_path": "pandas\\pandas\\compat\\__init__.py", + "ast_data": "FunctionDef name:is_platform_windows arguments Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "select", + "source_code": "def select(self, attributes):\n self._options['select'] = copy.copy(attributes)\n return self", + "docstring": "Select the attributes to display. See for supported attributes. Args: attributes: A list of attribute the profiler node has. Returns: self", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py", + "ast_data": "FunctionDef name:select arg:self arg:attributes arguments arg arg Assign Call Return return:yes" + }, + { + "library": "django", + "name": "deferrable_sql", + "source_code": "def deferrable_sql(self):\n return ''", + "docstring": "Return the SQL to make a constraint \"initially deferred\" during a CREATE TABLE statement.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:deferrable_sql arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_argcheck", + "source_code": "def _argcheck(self, *args):\n cond = 1\n for arg in args:\n cond = logical_and(cond, asarray(arg) > 0)\n return cond", + "docstring": "Default check for correct values on args and keywords. Returns condition array of 1's where arguments are correct and 0's where they are not.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_argcheck arg:self arguments arg arg Assign For Assign Call Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_shard_dimension", + "source_code": "def set_shard_dimension(self, shard_dimension):\n if self._frozen:\n if self._shard_dimension != shard_dimension:\n raise ValueError(\"Can't set shard dimension to %d since it has been frozen to use %d.\" % (shard_dimension, self._shard_dimension))\n else:\n self._shard_dimension = tensor_shape.as_dimension(shard_dimension)", + "docstring": "Sets the shard dimension for the current policy. If the policy has been frozen then shard_dimension must match the existing setting. Args: shard_dimension: The shard dimension to use in the policy. Raises: ValueError: If the policy has been frozen and shard_dimension differs from the frozen value, or shard_dimension can't be interpreted as a Dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py", + "ast_data": "FunctionDef name:set_shard_dimension arg:self arg:shard_dimension arguments arg arg If If Compare Raise Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_compute_size_of_strided_dim", + "source_code": "def _compute_size_of_strided_dim(shrink, spec, size):\n unknown = None\n use_full_range = None\n if shrink:\n return 1\n if size is unknown or size.value is unknown:\n return unknown\n size = size.value\n stride = spec.step\n if stride is not unknown:\n if stride == 0:\n return unknown\n stride = spec.step\n valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n def canonical(x, c):\n if x is use_full_range:\n return valid_range[c] if stride > 0 else valid_range[c + 1 & 1]\n else:\n x_fwd = size + x if x < 0 else x\n return max(valid_range[0], min(valid_range[1], x_fwd))\n begin = canonical(spec.start, 0)\n end = canonical(spec.stop, 1)\n interval_length = end - begin\n if interval_length == 0 or (interval_length < 0) != (stride < 0):\n return 0\n else:\n remainder = 1 if interval_length % stride != 0 else 0\n return interval_length // stride + remainder\n else:\n return unknown", + "docstring": "Computes the size of a single strided slice dimension.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:_compute_size_of_strided_dim arg:shrink arg:spec arg:size arguments arg arg arg Assign Assign If Return return:yes If BoolOp Compare Compare Return return:yes Assign Assign If Compare If Compare Return return:yes Assign Assign Compare FunctionDef name:canonical arg:x arg:c arguments arg arg If Compare Return return:yes Compare Assign Compare Return return:yes Call Call Assign Call Assign Call Assign If BoolOp Compare Compare Compare Compare Return return:yes Assign Compare Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_unbox_scalar", + "source_code": "def _unbox_scalar(self, value: DTScalarOrNaT) -> np.int64 | np.datetime64 | np.timedelta64:\n raise AbstractMethodError(self)", + "docstring": "Unbox the integer value of a scalar . Parameters ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. Returns ------- int Examples -------- >>> arr = pd.array(np.array([\"1970-01-01\"], \"datetime64[ns]\")) >>> arr._unbox_scalar(arr[0]) np.datetime64('1970-01-01T00:00:00.000000000')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_unbox_scalar arg:self arg:value arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "stride", + "source_code": "def stride(self, node: IRNode, index: int, default_value: int=0) -> str:\n if node is None:\n return str(default_value)\n index = _normalize_idx(index, len(node.get_size()))\n if index < 0:\n return str(default_value)\n stride = node.get_stride()[index]\n if V.graph.sizevars.statically_known_leq(stride, 1):\n return str(stride)\n return self.find_symbol(node, 'stride', dim=index) or str(stride)", + "docstring": "Hook called from template code to get the stride of an arg. Generates code which represents stride of a given node at index. If node is None, returns default_value. TODO: Will add needed args to pass it in if it is dynamic.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", + "ast_data": "FunctionDef name:stride arg:self arg:node arg:index arg:default_value arguments arg arg arg arg If Compare Return return:yes Call Assign Call Call Call If Compare Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "init", + "source_code": "def init(self):\n return self._init_op_fn(self._resource)", + "docstring": "See .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:init arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ReplicationPad1d", + "source_code": "class ReplicationPad1d(_ReplicationPadNd):\n padding: tuple[int, int]\n\n def __init__(self, padding: _size_2_t) -> None:\n super().__init__()\n self.padding = _pair(padding)", + "docstring": "Pads the input tensor using replication of the input boundary. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 2-, uses (:math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: Examples:: >>> # xdoctest: +IGNORE_WANT(\"not sure why xdoctest is choking on this\") >>> m = nn.ReplicationPad1d(2) >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) >>> input tensor([[[0., 1., 2., 3.], [4., 5., 6., 7.]]]) >>> m(input) tensor([[[0., 0., 0., 1., 2., 3., 3., 3.], [4., 4., 4., 5., 6., 7., 7., 7.]]]) >>> # using different paddings for different sides >>> m = nn.ReplicationPad1d((3, 1)) >>> m(input) tensor([[[0., 0., 0., 0., 1., 2., 3., 3.], [4., 4., 4., 4., 5., 6., 7., 7.]]])", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\padding.py", + "ast_data": "ClassDef name:ReplicationPad1d FunctionDef name:__init__ arg:self arg:padding arguments arg arg Call Call Assign Call" + }, + { + "library": "cherrypy", + "name": "index", + "source_code": "@cherrypy.expose\ndef index(self):\n users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']\n yield self.header()\n yield '

    List of users:

    '\n for user in users:\n yield ('%s
    ' % user)\n yield self.footer()", + "docstring": "Stream HTTP response body of generator app index URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py", + "ast_data": "FunctionDef name:index arg:self arguments arg Assign Call For Call" + }, + { + "library": "seaborn", + "name": "_matrix_mask", + "source_code": "def _matrix_mask(data, mask):\n if mask is None:\n mask = np.zeros(data.shape, bool)\n if isinstance(mask, pd.DataFrame):\n if not mask.index.equals(data.index) and mask.columns.equals(data.columns):\n err = 'Mask must have the same index and columns as data.'\n raise ValueError(err)\n elif hasattr(mask, '__array__'):\n mask = np.asarray(mask)\n if mask.shape != data.shape:\n raise ValueError('Mask must have the same shape as data.')\n mask = pd.DataFrame(mask, index=data.index, columns=data.columns, dtype=bool)\n mask = mask | pd.isnull(data)\n return mask", + "docstring": "Ensure that data and mask are compatible and add missing values. Values will be plotted for cells where `` can be an array or a DataFrame.", + "type": "function", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:_matrix_mask arg:data arg:mask arguments arg arg If Compare Assign Call If Call If BoolOp Call Call Assign Raise Call If Call Assign Call If Compare Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_batch_lowrank_logdet", + "source_code": "def _batch_lowrank_logdet(W, D, capacitance_tril):\n return 2 * capacitance_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + D.log().sum(-1)", + "docstring": "Uses \"matrix determinant lemma\":: log|W @ W.T + D| = log|C| + log|D|, where :math: is the capacitance matrix :math:, to compute the log determinant.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\lowrank_multivariate_normal.py", + "ast_data": "FunctionDef name:_batch_lowrank_logdet arg:W arg:D arg:capacitance_tril arguments arg arg arg Return return:yes Call Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self):\n cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)", + "docstring": "Initialize the statistics gathering tool.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call" + }, + { + "library": "numpy", + "name": "ndim", + "source_code": "def ndim(obj):\n return np.ndim(getdata(obj))", + "docstring": "maskedarray version of the numpy function.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:ndim arg:obj arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "default_dtypes", + "source_code": "def default_dtypes(self, /, *, device: _Device | None=None) -> DefaultDTypes:\n if device not in ['cpu', _DASK_DEVICE, None]:\n raise ValueError(f'Device not understood. Only \"cpu\" or _DASK_DEVICE is allowed, but received: {device!r}')\n return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}", + "docstring": "The default data types used for new Dask arrays. For Dask, this always returns the following dictionary: - **\"real floating\"**: `` Parameters ---------- device : str, optional The device to get the default data types for. Returns ------- dtypes : dict A dictionary describing the default data types used for new Dask arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': dask.float64, 'complex floating': dask.complex128, 'integral': dask.int64, 'indexing': dask.int64}", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py", + "ast_data": "FunctionDef name:default_dtypes arguments arg arg If Compare Raise Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_ReshapeGrad", + "source_code": "@ops.RegisterGradient('Reshape')\ndef _ReshapeGrad(op: ops.Operation, grad):\n input_shape = op.inputs[0].shape\n if input_shape.rank is not None and (not input_shape.is_fully_defined()):\n input_shape_as_list = input_shape.as_list()\n undefined_dims = []\n has_zero_dim = False\n for i, dim in enumerate(input_shape_as_list):\n if dim is None:\n undefined_dims.append(i)\n elif dim == 0:\n has_zero_dim = True\n if len(undefined_dims) == 1 and (not has_zero_dim):\n input_shape_as_list[undefined_dims[0]] = -1\n return [array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), input_shape_as_list), None]\n return [array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])), None]", + "docstring": "Defines the gradient for .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_ReshapeGrad arg:op arg:grad arguments arg arg Assign If BoolOp Compare Call Assign Call Assign Assign For Call If Compare Call If Compare Assign If BoolOp Compare Call Assign Return return:yes Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_with_dependencies", + "source_code": "def _with_dependencies(self, dependencies):\n new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits)\n return RowPartition(row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)", + "docstring": "Returns a new RowPartition equal to self with control dependencies. Specifically, self._row_splits is gated by the given control dependencies. Used to add sanity checks to the constructors. Args: dependencies: a list of tensors to use as dependencies. Returns: A new RowPartition object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_with_dependencies arg:self arg:dependencies arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_plot", + "source_code": "def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):\n fig = plt.figure('scikit-learn multilabel metrics benchmarks')\n plt.title(title)\n ax = fig.add_subplot(111)\n for i, metric in enumerate(metrics):\n for j, format in enumerate(formats):\n ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)])\n ax.set_xlabel(x_label)\n ax.set_ylabel('Time (s)')\n ax.legend()\n plt.show()", + "docstring": "Plot the results by metric, format and some other variable given by x_label", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_multilabel_metrics.py", + "ast_data": "FunctionDef name:_plot arg:results arg:metrics arg:formats arg:title arg:x_ticks arg:x_label arg:format_markers arg:metric_colors arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call For Call For Call Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "power", + "source_code": "def power(a, b, third=None):\n if third is not None:\n raise MaskError('3-argument power not supported.')\n ma = getmask(a)\n mb = getmask(b)\n m = mask_or(ma, mb)\n fa = getdata(a)\n fb = getdata(b)\n if isinstance(a, MaskedArray):\n basetype = type(a)\n else:\n basetype = MaskedArray\n with np.errstate(divide='ignore', invalid='ignore'):\n result = np.where(m, fa, umath.power(fa, fb)).view(basetype)\n result._update_from(a)\n invalid = np.logical_not(np.isfinite(result.view(ndarray)))\n if m is not nomask:\n if not result.ndim:\n return masked\n result._mask = np.logical_or(m, invalid)\n if invalid.any():\n if not result.ndim:\n return masked\n elif result._mask is nomask:\n result._mask = invalid\n result._data[invalid] = result.fill_value\n return result", + "docstring": "Returns element-wise base array raised to power from second array. This is the masked array version of . For details see . See Also -------- numpy.power Notes ----- The *out* argument to is not supported, has to be None. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) >>> ma.power(masked_x, 2) masked_array(data=[125.43999999999998, 15.784728999999999, 0.6416010000000001, --], mask=[False, False, False, True], fill_value=1e+20) >>> y = [-0.5, 2, 0, 17] >>> masked_y = ma.masked_array(y, mask) >>> masked_y masked_array(data=[-0.5, 2.0, 0.0, --], mask=[False, False, False, True], fill_value=1e+20) >>> ma.power(masked_x, masked_y) masked_array(data=[0.2988071523335984, 15.784728999999999, 1.0, --], mask=[False, False, False, True], fill_value=1e+20)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:power arg:a arg:b arg:third arguments arg arg arg If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Assign Call Assign With Call Assign Call Call Call Call Assign Call Call Call If Compare If Return return:yes Assign Call If Call If Return return:yes If Compare Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_variable_op", + "source_code": "def _is_variable_op(op):\n return op in _VARIABLE_OPS", + "docstring": "Returns true if 'op' refers to a Variable node.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py", + "ast_data": "FunctionDef name:_is_variable_op arg:op arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "is_available", + "source_code": "def is_available():\n return torch._C._has_cudnn", + "docstring": "Return a bool indicating if CUDNN is currently available.", + "type": "function", + "file_path": "pytorch\\torch\\backends\\cudnn\\__init__.py", + "ast_data": "FunctionDef name:is_available arguments Return return:yes" + }, + { + "library": "pytorch", + "name": "NSTracer", + "source_code": "class NSTracer(quantize_fx.QuantizationTracer):\n\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:\n if isinstance(m, torch.ao.quantization.ObserverBase):\n return True\n elif isinstance(m, torch.ao.quantization.FakeQuantizeBase):\n return True\n return super().is_leaf_module(m, module_qualified_name)", + "docstring": "Just like a regular FX quantization tracer, but treats observers and fake_quantize modules as leaf modules.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py", + "ast_data": "ClassDef name:NSTracer FunctionDef name:is_leaf_module arg:self arg:m arg:module_qualified_name arguments arg arg arg If Call Return return:yes If Call Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_ticks_position", + "source_code": "def get_ticks_position(self):\n return {1: 'bottom', 2: 'top', 'default': 'default', 'unknown': 'unknown'}[self._get_ticks_position()]", + "docstring": "Return the ticks position (\"top\", \"bottom\", \"default\", or \"unknown\").", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_ticks_position arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "elu", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef elu(x, alpha=1.0):\n res = nn.elu(x)\n if alpha == 1:\n return res\n else:\n return array_ops.where_v2(x > 0, res, alpha * res)", + "docstring": "Exponential linear unit. Args: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:elu arg:x arg:alpha arguments arg arg Assign Call If Compare Return return:yes Return return:yes Call Compare" + }, + { + "library": "pytorch", + "name": "drop_removed_buffers", + "source_code": "def drop_removed_buffers(self, lines):\n for i, line in enumerate(lines):\n if isinstance(line, (AllocateLine, FreeIfNotReusedLine, ReuseLine)):\n if line.node.get_name() in V.graph.removed_buffers:\n lines[i] = NullLine(self.wrapper)", + "docstring": "Replace any memory planning lines in V.graph.removed_buffers with NullLine", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:drop_removed_buffers arg:self arg:lines arguments arg arg For Call If Call If Compare Call Assign Call" + }, + { + "library": "pandas", + "name": "__getattr__", + "source_code": "@final\ndef __getattr__(self, name: str):\n if name not in self._internal_names_set and name not in self._metadata and (name not in self._accessors) and self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)", + "docstring": "After regular attribute access, try looking up the name This allows simpler access to columns for interactive use.", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:__getattr__ arg:self arg:name arguments arg arg If BoolOp Compare Compare Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "save_exported_model", + "source_code": "def save_exported_model(self, dst_saved_model_path: str, exported_model_serialized: bytes, src_saved_model_path: str, tags: set[str], serialized_signature_def_map: dict[str, bytes]) -> Optional[bool]:\n exported_model = exported_model_pb2.ExportedModel.FromString(exported_model_serialized)\n signature_def_map = {}\n for key, serialized_signature_def in serialized_signature_def_map.items():\n signature_def_map[key] = meta_graph_pb2.SignatureDef.FromString(serialized_signature_def)\n return _call_and_return_none_on_error(func=functools.partial(_save_model_and_copy_assets, exported_model, src_saved_model_path, dst_saved_model_path, signature_def_map, tags), error_msg=f'Failed to save model \"{dst_saved_model_path}\", signature_def_map: {signature_def_map}, tags: {tags}.')", + "docstring": "Saves to as a SavedModel. Args: dst_saved_model_path: Destination path to save the exported model. exported_model_serialized: Exported model to export as SavedModel. src_saved_model_path: Path to the source SavedModel. This will be used to copy the asset files to . tags: Tags to attach to the saved MetaGraphDef. serialized_signature_def_map: Signature key -> serialized SignatureDef. Returns: upon successful execution. when an error is raised internally.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py", + "ast_data": "FunctionDef name:save_exported_model arg:self arg:dst_saved_model_path arg:exported_model_serialized arg:src_saved_model_path arg:tags arg:serialized_signature_def_map arguments arg arg arg arg arg arg Assign Call Assign For Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "join_hook", + "source_code": "def join_hook(self, **kwargs):\n return _ZeROJoinHook(self)", + "docstring": "Return the ZeRO join hook. It enables training on uneven inputs by shadowing the collective communications in the optimizer step. Gradients must be properly set before this hook is called. Arguments: kwargs (dict): a :class: containing any keyword arguments to modify the behavior of the join hook at run time; all :class: instances sharing the same join context manager are forwarded the same value for `` is unused.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:join_hook arg:self arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "members_of", + "source_code": "def members_of(obj: Any, *, config: Config) -> Sequence[str]:\n if config.autosummary_ignore_module_all:\n return dir(obj)\n else:\n if (obj___all__ := getall(obj)) is not None:\n return obj___all__\n return dir(obj)", + "docstring": "Get the members of `` setting.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py", + "ast_data": "FunctionDef name:members_of arg:obj arguments arg arg If Return return:yes Call If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_slot_names", + "source_code": "def get_slot_names(self, *args, **kwargs):\n return self._opt.get_slot_names(*args, **kwargs)", + "docstring": "Return a list of the names of slots created by the . This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py", + "ast_data": "FunctionDef name:get_slot_names arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "supports_blit", + "source_code": "@_api.classproperty\ndef supports_blit(cls):\n return hasattr(cls, 'copy_from_bbox') and hasattr(cls, 'restore_region')", + "docstring": "If this Canvas sub-class supports blitting.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:supports_blit arg:cls arguments arg Return return:yes BoolOp Call Call" + }, + { + "library": "pandas", + "name": "_add_timedeltalike_scalar", + "source_code": "def _add_timedeltalike_scalar(self, other):\n if isna(other):\n new_values = np.empty(self.shape, dtype='i8').view(self._ndarray.dtype)\n new_values.fill(iNaT)\n return type(self)._simple_new(new_values, dtype=self.dtype)\n self = cast('DatetimeArray | TimedeltaArray', self)\n other = Timedelta(other)\n self, other = self._ensure_matching_resos(other)\n return self._add_timedeltalike(other)", + "docstring": "Add a delta of a timedeltalike Returns ------- Same type as self", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_add_timedeltalike_scalar arg:self arg:other arguments arg arg If Call Assign Call Call Call Return return:yes Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "cast", + "source_code": "@tf_export('cast', 'dtypes.cast')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef cast(x, dtype, name=None):\n base_type = dtypes.as_dtype(dtype).base_dtype\n if (isinstance(x, tensor_lib.Tensor) or _pywrap_utils.IsResourceVariable(x)) and base_type == x.dtype:\n return x\n with ops.name_scope(name, 'Cast', [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n values_cast = cast(x.values, base_type, name=name)\n x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)\n elif isinstance(x, indexed_slices.IndexedSlices):\n values_cast = cast(x.values, base_type, name=name)\n x = indexed_slices.IndexedSlices(values_cast, x.indices, x.dense_shape)\n else:\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.is_complex and base_type.is_floating:\n logging.warn(f'You are casting an input of type {x.dtype.name} to an incompatible dtype {base_type.name}. This will discard the imaginary part and may not be what you intended.')\n if x.dtype != base_type:\n x = gen_math_ops.cast(x, base_type, name=name)\n return x", + "docstring": "Casts a tensor to a new type. The operation casts (in case of ) or (in case of or ) to . For example: >>> x = tf.constant([1.8, 2.2], dtype=tf.float32) >>> tf.cast(x, tf.int32) Notice has an alias : >>> x = tf.constant([1.8, 2.2], dtype=tf.float32) >>> tf.dtypes.cast(x, tf.int32) The operation supports data types (for and ) of , , , , , , , , , , , , , . In case of casting from complex types (, ) to real types, only the real part of is returned. In case of casting from real types to complex types (, ), the imaginary part of the returned value is set to . The handling of complex types here matches the behavior of numpy. Note casting nan and inf values to integral types has undefined behavior. Note this operation can lead to a loss of precision when converting native Python and variables to or tensors, since the input is first converted to the data type and then widened. It is recommended to use instead of for any non-tensor inputs. Args: x: A or or of numeric type. It could be , , , , , , , , , , , , , . dtype: The destination type. The list of supported dtypes is the same as . name: A name for the operation (optional). Returns: A or or with same shape as and same type as . Raises: TypeError: If cannot be cast to the .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:cast arg:x arg:dtype arg:name arguments arg arg arg Assign Call If BoolOp BoolOp Call Call Compare Return return:yes With Call If Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call If BoolOp Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_check_shapes", + "source_code": "def _check_shapes(self):\n uv_shape = array_ops.broadcast_static_shape(self.u.shape, self.v.shape)\n batch_shape = array_ops.broadcast_static_shape(self.base_operator.batch_shape, uv_shape[:-2])\n tensor_shape.Dimension(self.base_operator.domain_dimension).assert_is_compatible_with(uv_shape[-2])\n if self._diag_update is not None:\n tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(self._diag_update.shape[-1])\n array_ops.broadcast_static_shape(batch_shape, self._diag_update.shape[:-1])", + "docstring": "Static check that shapes are compatible.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py", + "ast_data": "FunctionDef name:_check_shapes arg:self arguments arg Assign Call Assign Call Call Call If Compare Call Call Call" + }, + { + "library": "cryptography", + "name": "_check_empty", + "source_code": "def _check_empty(data: utils.Buffer) -> None:\n if data:\n raise ValueError('Corrupt data: unparsed data')", + "docstring": "All data should have been parsed.", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:_check_empty arg:data arguments arg If Raise Call" + }, + { + "library": "tensorflow", + "name": "_sanitize_slices", + "source_code": "def _sanitize_slices(slices, intended_shape, deficient_shape):\n sanitized_slices = []\n idx = 0\n for slc in slices:\n if slc is Ellipsis:\n if idx < 0:\n raise ValueError('Found multiple `...` in slices {}'.format(slices))\n num_remaining_non_newaxis_slices = sum((s is not array_ops.newaxis for s in slices[slices.index(Ellipsis) + 1:]))\n idx = -num_remaining_non_newaxis_slices\n elif slc is array_ops.newaxis:\n pass\n else:\n is_broadcast = intended_shape[idx] > deficient_shape[idx]\n if isinstance(slc, slice):\n start, stop, step = (slc.start, slc.stop, slc.step)\n if start is not None:\n start = _prefer_static_where(is_broadcast, 0, start)\n if stop is not None:\n stop = _prefer_static_where(is_broadcast, 1, stop)\n if step is not None:\n step = _prefer_static_where(is_broadcast, 1, step)\n slc = slice(start, stop, step)\n else:\n slc = _prefer_static_where(is_broadcast, 0, slc)\n idx += 1\n sanitized_slices.append(slc)\n return sanitized_slices", + "docstring": "Restricts slices to avoid overflowing size-1 (broadcast) dimensions. Args: slices: iterable of slices received by . intended_shape: int shape for which the slices were intended. deficient_shape: int shape to which the slices will be applied. Must have the same rank as . Returns: sanitized_slices: Python of slice objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\slicing.py", + "ast_data": "FunctionDef name:_sanitize_slices arg:slices arg:intended_shape arg:deficient_shape arguments arg arg arg Assign Assign For If Compare If Compare Raise Call Call Assign Call Compare Call Assign If Compare Assign Compare If Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "unsorted_segment_mean", + "source_code": "@tf_export('math.unsorted_segment_mean', v1=['math.unsorted_segment_mean', 'unsorted_segment_mean'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('unsorted_segment_mean')\ndef unsorted_segment_mean(data, segment_ids, num_segments, name=None):\n with ops.name_scope(name, 'UnsortedSegmentMean'):\n data = ops.convert_to_tensor(data)\n segment_ids = ops.convert_to_tensor(segment_ids)\n N = _unsorted_segment_N(data, segment_ids, num_segments)\n summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)\n return summed / N", + "docstring": "Computes the mean along segments of a tensor. Read [the section on segmentation]( for an explanation of segments. This operator is similar to the operator. Instead of computing the sum over segments, it computes the mean of all entries belonging to a segment such that: \\\\(output_i = 1/N_i \\sum_{j...} data[j...]\\\\) where the sum is over tuples such that with \\\\N_i\\\\ being the number of occurrences of id \\\\i\\\\. If there is no entry for a given segment ID , it outputs 0. If the given segment ID is negative, the value is dropped and will not be added to the sum of the segment. Caution: On CPU, values in are always validated to be less than , and an error is thrown for out-of-bound indices. On GPU, this does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices result in safe but unspecified behavior, which may include ignoring out-of-bound indices or outputting a tensor with a 0 stored in the first dimension of its shape if is 0. Args: data: A with floating point or complex dtype. segment_ids: An integer tensor whose shape is a prefix of . The values must be less than . The values are always validated to be in range on CPU, never validated on GPU. num_segments: An integer scalar . The number of distinct segment IDs. name: A name for the operation (optional). Returns: A . Has same shape as data, except for the first dimensions, which are replaced with a single dimension which has size .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:unsorted_segment_mean arg:data arg:segment_ids arg:num_segments arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_getrow", + "source_code": "def _getrow(self, i):\n if self.ndim == 1:\n raise ValueError('getrow not meaningful for a 1d array')\n M = self.shape[0]\n if i < 0:\n i += M\n if i < 0 or i >= M:\n raise IndexError('index out of bounds')\n row_selector = self._csr_container(([1], [[0], [i]]), shape=(1, M), dtype=self.dtype)\n return row_selector @ self", + "docstring": "Returns a copy of row i of the array, as a (1 x n) sparse array (row vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg If Compare Raise Call Assign If Compare If BoolOp Compare Compare Raise Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "detect_platform", + "source_code": "def detect_platform():\n if on_gcp():\n if context.context().list_logical_devices('GPU'):\n return PlatformDevice.GCE_GPU\n elif context.context().list_logical_devices('TPU'):\n return PlatformDevice.GCE_TPU\n else:\n return PlatformDevice.GCE_CPU\n elif context.context().list_logical_devices('GPU'):\n return PlatformDevice.INTERNAL_GPU\n elif context.context().list_logical_devices('TPU'):\n return PlatformDevice.INTERNAL_TPU\n else:\n return PlatformDevice.INTERNAL_CPU", + "docstring": "Returns the platform and device information.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling_util.py", + "ast_data": "FunctionDef name:detect_platform arguments If Call If Call Call Return return:yes If Call Call Return return:yes Return return:yes If Call Call Return return:yes If Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, *args):\n backward_fn = self._forward_cls.backward\n vjp_fn = self._forward_cls.vjp\n if backward_fn is not Function.backward and vjp_fn is not Function.vjp:\n raise RuntimeError(\"Implementing both 'backward' and 'vjp' for a custom Function is not allowed. You should only implement one of them.\")\n user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn\n return user_fn(self, *args)", + "docstring": "Apply method used when executing this Node during the backward", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:apply arg:self arguments arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign Compare Return return:yes Call" + }, + { + "library": "pytorch", + "name": "save", + "source_code": "def save(self, f, **kwargs):\n return self._c.save(str(f), **kwargs)", + "docstring": "Save with a file-like object. save(f, _extra_files={}) See :func: which accepts a file-like object. This function, torch.save(), converts the object to a string, treating it as a path. DO NOT confuse these two functions when it comes to the 'f' parameter functionality.", + "type": "method", + "file_path": "pytorch\\torch\\jit\\_script.py", + "ast_data": "FunctionDef name:save arg:self arg:f arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_process_call", + "source_code": "def _process_call(self, node: ast.Call) -> None:\n func = node.func\n if self._is_export_call(func):\n func = cast(ast.Call, func)\n if len(node.args) != 1 or node.keywords:\n raise BadExportError(f'{self._current_file}:{node.lineno} export must be called with a single value: {ast.dump(node)}')\n symbol = self._name(self._unwrap_simple_call(node.args[0]))\n if not symbol:\n raise BadExportError(f'{self._current_file}:{node.lineno} export must be called with a single value: {ast.dump(node)}')\n self._add_exported_symbol(func, symbol)\n elif isinstance(func, ast.Attribute) and func.attr == 'export_constant' and self._is_export_call(func.value):\n if len(node.args) != 2 or node.keywords or self._name(node.args[0]) != '__name__':\n raise BadExportError(f'{self._current_file}:{node.lineno} export_constant must be called with __name__, : {ast.dump(node)}')\n self._add_exported_symbol(func.value, self._literal_value(node.args[1]))\n else:\n self.visit(node)", + "docstring": "Process top-level call for potential symbol export.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py", + "ast_data": "FunctionDef name:_process_call arg:self arg:node arguments arg arg Assign If Call Assign Call If BoolOp Compare Call Raise Call Call Assign Call Call If Raise Call Call Call If BoolOp Call Compare Call If BoolOp Compare Call Compare Call Raise Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_should_cache", + "source_code": "def _should_cache():\n if context.executing_eagerly():\n return False\n graph = ops.get_default_graph()\n ctxt = graph._get_control_flow_context()\n in_v1_while_loop = control_flow_util.GetContainingWhileContext(ctxt) is not None\n in_v2_while_loop = control_flow_util_v2.in_while_loop_defun(graph)\n return not in_v1_while_loop and (not in_v2_while_loop)", + "docstring": "Returns True if a default caching device should be set, otherwise False.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py", + "ast_data": "FunctionDef name:_should_cache arguments If Call Return return:yes Assign Call Assign Call Assign Compare Call Assign Call Return return:yes BoolOp" + }, + { + "library": "pytorch", + "name": "InductorOutput", + "source_code": "class InductorOutput(Generic[TOut], ABC):\n\n @abstractmethod\n def pre_save(self) -> None:\n ...\n\n @abstractmethod\n def load(self, example_inputs) -> TOut:\n ...\n\n @abstractmethod\n def post_compile(self, result: TOut, fx_config: _CompileFxKwargs) -> TOut:\n ...", + "docstring": "Class representing a single inductor output", + "type": "class", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "ClassDef name:InductorOutput FunctionDef name:pre_save arg:self arguments arg FunctionDef name:load arg:self arg:example_inputs arguments arg arg FunctionDef name:post_compile arg:self arg:result arg:fx_config arguments arg arg arg" + }, + { + "library": "sphinx", + "name": "add_generic_role", + "source_code": "def add_generic_role(self, name: str, nodeclass: type[Node], override: bool=False) -> None:\n logger.debug('[app] adding generic role: %r', (name, nodeclass))\n if not override and docutils.is_role_registered(name):\n logger.warning(__('role %r is already registered and will not be overridden'), name, type='app', subtype='add_generic_role')\n role = roles.GenericRole(name, nodeclass)\n docutils.register_role(name, role)", + "docstring": "Register a generic Docutils role. Register a Docutils role that does nothing but wrap its contents in the node given by *nodeclass*. :param override: If false, do not install it if another role is already installed as the same name If true, unconditionally install the role. .. versionadded:: 0.6 .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_generic_role arg:self arg:name arg:nodeclass arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "reserve", + "source_code": "def reserve(self, n: int) -> Optional[str]:\n with self._lock:\n for lower_index in range(self._num_cores - n + 1):\n indices = tuple(range(lower_index, lower_index + n))\n if all((self._available[i] for i in indices)):\n for i in indices:\n self._available[i] = False\n lower_core = indices[0] + self._min_core_id\n upper_core = indices[-1] + self._min_core_id\n key = f'{lower_core}-{upper_core}' if n > 1 else f'{lower_core}'\n self._reservations[key] = indices\n return key\n return None", + "docstring": "Simple first-fit policy. If successful, return a string for . Otherwise, return None.", + "type": "method", + "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py", + "ast_data": "FunctionDef name:reserve arg:self arg:n arguments arg arg With For Call Assign Call Call If Call For Assign Assign Assign Assign Compare Assign Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "ravel_compat", + "source_code": "def ravel_compat(meth: F) -> F:\n\n @wraps(meth)\n def method(self, *args, **kwargs):\n if self.ndim == 1:\n return meth(self, *args, **kwargs)\n flags = self._ndarray.flags\n flat = self.ravel('K')\n result = meth(flat, *args, **kwargs)\n order = 'F' if flags.f_contiguous else 'C'\n return result.reshape(self.shape, order=order)\n return cast(F, method)", + "docstring": "Decorator to ravel a 2D array before passing it to a cython operation, then reshape the result to our own shape.", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py", + "ast_data": "FunctionDef name:ravel_compat arg:meth arguments arg FunctionDef name:method arg:self arguments arg arg arg If Compare Return return:yes Call Assign Assign Call Assign Call Assign Return return:yes Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "equalize3d", + "source_code": "@perform_keep_shape_video\ndef equalize3d(input: Tensor) -> Tensor:\n res = []\n for volume in input:\n scaled_input = torch.stack([_scale_channel(volume[i, :, :, :]) for i in range(len(volume))])\n res.append(scaled_input)\n return torch.stack(res)", + "docstring": "Equalize the values for a 3D volumetric tensor. Implements Equalize function for a sequence of images using PyTorch ops based on uint8 format: Args: input: image tensor with shape :math: to equalize. Returns: Equalized volume with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "FunctionDef name:equalize3d arg:input arguments arg Assign For Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "MultiKernelState", + "source_code": "class MultiKernelState:\n\n def __init__(self):\n self.subkernel_to_kernel_name = {}\n self.kernel_defs = IndentedBuffer()\n\n def define_kernel(self, kernels):\n kernel_names = tuple((k.kernel_name for k in kernels))\n if kernel_names in self.subkernel_to_kernel_name:\n return self.subkernel_to_kernel_name[kernel_names]\n multi_kernel_name = f'multi_kernel_{len(self.subkernel_to_kernel_name)}'\n self.subkernel_to_kernel_name[kernel_names] = multi_kernel_name\n if V.graph.cpp_wrapper and (not config.triton.autotune_at_compile_time):\n return multi_kernel_name\n buf = self.kernel_defs\n buf.writeline('')\n buf.writeline(f'{multi_kernel_name} = async_compile.multi_kernel({multi_kernel_name!r}, [')\n with buf.indent():\n for name in kernel_names:\n buf.writeline(f'{name},')\n buf.writeline('])')\n if config.triton.autotune_at_compile_time:\n V.graph.wrapper_code.src_to_kernel['\\n'.join(kernel_names)] = multi_kernel_name\n return multi_kernel_name", + "docstring": "Maintain state of multi-kernel compilation so we don't define duplicated multi-kernel for the same set of sub-kernels. V.graph.wrapper_code has a reference to MultiKernelState instance.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py", + "ast_data": "ClassDef name:MultiKernelState FunctionDef name:__init__ arg:self arguments arg Assign Assign Call FunctionDef name:define_kernel arg:self arg:kernels arguments arg arg Assign Call If Compare Return return:yes Assign Call Assign If BoolOp Return return:yes Assign Call Call With Call For Call Call If Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "_load_base", + "source_code": "def _load_base(self) -> list[RGBColor]:\n return {'autumn': cm_data.get_autumn_base, 'bone': cm_data.get_bone_base, 'jet': cm_data.get_jet_base, 'winter': cm_data.get_winter_base, 'rainbow': cm_data.get_rainbow_base, 'ocean': cm_data.get_ocean_base, 'summer': cm_data.get_summer_base, 'spring': cm_data.get_spring_base, 'cool': cm_data.get_cool_base, 'hsv': cm_data.get_hsv_base, 'brg': cm_data.get_bgr_base, 'pink': cm_data.get_pink_base, 'hot': cm_data.get_hot_base, 'plasma': cm_data.get_plasma_base, 'viridis': cm_data.get_viridis_base, 'cividis': cm_data.get_cividis_base, 'twilight': cm_data.get_twilight_base, 'turbo': cm_data.get_turbo_base, 'seismic': cm_data.get_seismic_base}[self.name]()", + "docstring": "Load the base colormap corresponding to the enumeration member. Returns: The base colormap.", + "type": "method", + "file_path": "kornia\\kornia\\color\\colormap.py", + "ast_data": "FunctionDef name:_load_base arg:self arguments arg Return return:yes Call" + }, + { + "library": "virtualenv", + "name": "session_via_cli", + "source_code": "def session_via_cli(args, options=None, setup_logging=True, env=None):\n env = os.environ if env is None else env\n parser, elements = build_parser(args, options, setup_logging, env)\n options = parser.parse_args(args)\n options.py_version = parser._interpreter.version_info\n creator, seeder, activators = tuple((e.create(options) for e in elements))\n return Session(options.verbosity, options.app_data, parser._interpreter, creator, seeder, activators)", + "docstring": "Create a virtualenv session (same as cli_run, but this does not perform the creation). Use this if you just want to query what the virtual environment would look like, but not actually create it. :param args: the command line arguments :param options: passing in a `` to use handlers already registered :param env: environment variables to use :return: the session object of the creation (its structure for now is experimental and might change on short notice)", + "type": "function", + "file_path": "virtualenv\\src\\virtualenv\\run\\__init__.py", + "ast_data": "FunctionDef name:session_via_cli arg:args arg:options arg:setup_logging arg:env arguments arg arg arg arg Assign Compare Assign Call Assign Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "f", + "source_code": "def f(x):\n cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)\n if isinstance(x, cast_types) and x.dtype.is_floating and (x.dtype.base_dtype.name != compute_dtype):\n return math_ops.cast(x, compute_dtype)\n elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating:\n return tensor.TensorSpec(x.shape, compute_dtype, x.name)\n else:\n return x", + "docstring": "Cast a single Tensor or TensorSpec to the compute dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:f arg:x arguments arg Assign If BoolOp Call Compare Return return:yes Call If BoolOp Call Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "map_array", + "source_code": "def map_array(arr: ArrayLike, mapper, na_action: Literal['ignore'] | None=None) -> np.ndarray | ExtensionArray | Index:\n from pandas import Index\n if na_action not in (None, 'ignore'):\n msg = f\"na_action must either be 'ignore' or None, {na_action} was passed\"\n raise ValueError(msg)\n if is_dict_like(mapper):\n if isinstance(mapper, dict) and hasattr(mapper, '__missing__'):\n dict_with_default = mapper\n mapper = lambda x: dict_with_default[np.nan if isinstance(x, float) and np.isnan(x) else x]\n else:\n from pandas import Series\n if len(mapper) == 0:\n mapper = Series(mapper, dtype=np.float64)\n elif isinstance(mapper, dict):\n mapper = Series(mapper.values(), index=Index(mapper.keys(), tupleize_cols=False))\n else:\n mapper = Series(mapper)\n if isinstance(mapper, ABCSeries):\n if na_action == 'ignore':\n mapper = mapper[mapper.index.notna()]\n indexer = mapper.index.get_indexer(arr)\n new_values = take_nd(mapper._values, indexer)\n return new_values\n if not len(arr):\n return arr.copy()\n values = arr.astype(object, copy=False)\n if na_action is None:\n return lib.map_infer(values, mapper)\n else:\n return lib.map_infer_mask(values, mapper, mask=isna(values).view(np.uint8))", + "docstring": "Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[ndarray, Index, ExtensionArray] The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned.", + "type": "function", + "file_path": "pandas\\pandas\\core\\algorithms.py", + "ast_data": "FunctionDef name:map_array arg:arr arg:mapper arg:na_action arguments arg arg arg If Compare Assign Raise Call If Call If BoolOp Call Call Assign Assign arguments arg BoolOp Call Call If Compare Call Assign Call If Call Assign Call Call Call Call Assign Call If Call If Compare Assign Call Assign Call Assign Call Return return:yes If Call Return return:yes Call Assign Call If Compare Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "ProcessUnusedLoopExits", + "source_code": "def ProcessUnusedLoopExits(self, pending_count, to_ops_set):\n loop_exits = []\n for grad_state in self._map.values():\n for y in grad_state.forward_loop_exits:\n if pending_count[y.op] == 0:\n grad_state.pending_exits_count -= 1\n if y.op not in to_ops_set:\n grad_state.unused_exits.append(y)\n if grad_state.pending_exits_count == 0:\n loop_exits.extend(grad_state.unused_exits)\n for y in grad_state.forward_context.loop_enters:\n if pending_count[y.op] == 0:\n pending_count[y.op] = 1\n return loop_exits", + "docstring": "Process all the \"unused\" loop exits. The \"unused\" exits of the loops are added to . An exit is unused if its pending_count is 0. If there is an exit with real gradient, all these deferred exits will enter the backprop loop with zero gradient. Otherwise, they will enter the backprop loop with None. As an example, people often write: The exit node for x2 is not included by the betweenness analysis. But we need to backprop x2 if x2 is involved in computing v1. Args: pending_count: The number of backprop inputs for every op. to_ops_set: The set of ops for ys in gradients(ys, xs) Returns: The set of unused loop exits that we know at this point we need to backprop.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:ProcessUnusedLoopExits arg:self arg:pending_count arg:to_ops_set arguments arg arg arg Assign For Call For If Compare If Compare Call If Compare Call For If Compare Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "process_results", + "source_code": "def process_results(self, response: Response, results: Iterable[Any]) -> Iterable[Any]:\n return results", + "docstring": "This overridable method is called for each result (item or request) returned by the spider, and it's intended to perform any last time processing required before returning the results to the framework core, for example setting the item GUIDs. It receives a list of results and the response which originated that results. It must return a list of results (items or requests).", + "type": "method", + "file_path": "scrapy\\scrapy\\spiders\\feed.py", + "ast_data": "FunctionDef name:process_results arg:self arg:response arg:results arguments arg arg arg Return return:yes" + }, + { + "library": "django", + "name": "get_default", + "source_code": "def get_default(self):\n return self._get_default()", + "docstring": "Return the default value for this field.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:get_default arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_set_text_position", + "source_code": "def _set_text_position(self, renderer):\n bbox = self.get_window_extent(renderer)\n y = bbox.y0 + bbox.height / 2\n loc = self._text.get_horizontalalignment()\n if loc == 'center':\n x = bbox.x0 + bbox.width / 2\n elif loc == 'left':\n x = bbox.x0 + bbox.width * self.PAD\n else:\n x = bbox.x0 + bbox.width * (1 - self.PAD)\n self._text.set_position((x, y))", + "docstring": "Set text up so it is drawn in the right place.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:_set_text_position arg:self arg:renderer arguments arg arg Assign Call Assign Assign Call If Compare Assign If Compare Assign Assign Call" + }, + { + "library": "django", + "name": "check_consistent_history", + "source_code": "def check_consistent_history(self, connection):\n recorder = MigrationRecorder(connection)\n applied = recorder.applied_migrations()\n for migration in applied:\n if migration not in self.graph.nodes:\n continue\n for parent in self.graph.node_map[migration].parents:\n if parent not in applied:\n if parent in self.replacements:\n if all((m in applied for m in self.replacements[parent].replaces)):\n continue\n raise InconsistentMigrationHistory(\"Migration {}.{} is applied before its dependency {}.{} on database '{}'.\".format(migration[0], migration[1], parent[0], parent[1], connection.alias))", + "docstring": "Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\loader.py", + "ast_data": "FunctionDef name:check_consistent_history arg:self arg:connection arguments arg arg Assign Call Assign Call For If Compare For If Compare If Compare If Call Compare Raise Call Call" + }, + { + "library": "django", + "name": "execute_from_command_line", + "source_code": "def execute_from_command_line(argv=None):\n utility = ManagementUtility(argv)\n utility.execute()", + "docstring": "Run a ManagementUtility.", + "type": "function", + "file_path": "django\\django\\core\\management\\__init__.py", + "ast_data": "FunctionDef name:execute_from_command_line arg:argv arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "map_structure_with_tuple_paths_up_to", + "source_code": "def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):\n return nest_util.map_structure_up_to(nest_util.Modality.CORE, shallow_tree, func, *inputs, **kwargs)", + "docstring": "Applies a function or op to a number of partially flattened inputs. Like map_structure_up_to(), except that the 'func' argument takes a path tuple as its first argument, followed by the corresponding values from *inputs. Example: Args: shallow_tree: a shallow structure, common to all the inputs. func: callable that takes args (path, inputs_0_value, ... , inputs_N_value), where path is a tuple path to an atom in shallow_tree, and inputs_i_value is the corresponding value from inputs[i]. *inputs: structures that are all structurally compatible with shallow_tree. **kwargs: kwargs to feed to func(). Special kwarg is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g. raises a exception). To allow this set this argument to . Raises: TypeError: If is a nested structure but one of is not. TypeError: If the structure types of are different from . ValueError: If the structure lengths of are different from . Returns: Result of repeatedly applying . Has the same structure layout as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", + "ast_data": "FunctionDef name:map_structure_with_tuple_paths_up_to arg:shallow_tree arg:func arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "def inverse(self) -> NamedPose:\n return NamedPose(self._dst_from_src.inverse(), self._frame_dst, self._frame_src)", + "docstring": "Inverse of the NamedPose. Returns: Inverse of the NamedPose. Example: >>> b_from_a = NamedPose(Se3.identity(), frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> b_from_a.inverse() NamedPose(dst_from_src=rotation: Parameter containing: tensor([1., -0., -0., -0.], requires_grad=True) translation: x: 0.0 y: 0.0 z: 0.0, frame_src: frame_b -> frame_dst: frame_a)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\pose.py", + "ast_data": "FunctionDef name:inverse arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "rewrap", + "source_code": "def rewrap(decorator_func, previous_target, new_target):\n cur = decorator_func\n innermost_decorator = None\n target = None\n while _has_tf_decorator_attr(cur):\n innermost_decorator = cur\n target = getattr(cur, '_tf_decorator')\n if target.decorated_target is previous_target:\n break\n cur = target.decorated_target\n assert cur is not None\n if innermost_decorator is None:\n assert decorator_func is previous_target\n return new_target\n target.decorated_target = new_target\n if inspect.ismethod(innermost_decorator):\n if hasattr(innermost_decorator, '__func__'):\n innermost_decorator.__func__.__wrapped__ = new_target\n elif hasattr(innermost_decorator, 'im_func'):\n innermost_decorator.im_func.__wrapped__ = new_target\n else:\n innermost_decorator.__wrapped__ = new_target\n else:\n innermost_decorator.__wrapped__ = new_target\n return decorator_func", + "docstring": "Injects a new target into a function built by make_decorator. This function allows replacing a function wrapped by , assuming the decorator that wraps the function is written as described below. The decorator function must use instead of the wrapped function that is normally used: Example: # Instead of this: def simple_parametrized_wrapper(*args, **kwds): return wrapped_fn(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) # Write this: def simple_parametrized_wrapper(*args, **kwds): return simple_parametrized_wrapper.__wrapped__(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) Note that this process modifies decorator_func. Args: decorator_func: Callable returned by . previous_target: Callable that needs to be replaced. new_target: Callable to replace previous_target with. Returns: The updated decorator. If decorator_func is not a tf_decorator, new_target is returned.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py", + "ast_data": "FunctionDef name:rewrap arg:decorator_func arg:previous_target arg:new_target arguments arg arg arg Assign Assign Assign While Call Assign Assign Call If Compare Assign Compare If Compare Compare Return return:yes Assign If Call If Call Assign If Call Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "dropout3d", + "source_code": "def dropout3d(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n inp_dim = input.dim()\n if inp_dim not in (4, 5):\n warn_msg = f'dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated and will result in an error in a future release. To retain the behavior and silence this warning, please use dropout instead. Note that dropout3d exists to provide channel-wise dropout on inputs with 3 spatial dimensions, a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).'\n warnings.warn(warn_msg)\n is_batched = inp_dim == 5\n if not is_batched:\n input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n if not is_batched:\n result = result.squeeze_(0) if inplace else result.squeeze(0)\n return result", + "docstring": "Randomly zero out entire channels (a channel is a 3D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 3D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:dropout3d arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call Assign Compare If Assign Call Call Assign Call Call If Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "ensure_decoded", + "source_code": "def ensure_decoded(s) -> str:\n if isinstance(s, (np.bytes_, bytes)):\n s = s.decode(get_option('display.encoding'))\n return s", + "docstring": "If we have bytes, decode them to unicode.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\common.py", + "ast_data": "FunctionDef name:ensure_decoded arg:s arguments arg If Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "expand_groups_in_paired_modules_list", + "source_code": "def expand_groups_in_paired_modules_list(paired_modules_list):\n new_list = []\n for group in paired_modules_list:\n if len(group) == 1:\n raise ValueError('Group must have at least two modules')\n elif len(group) == 2:\n new_list.append(group)\n elif len(group) > 2:\n new_list.extend(([group[i], group[i + 1]] for i in range(len(group) - 1)))\n return new_list", + "docstring": "Expands module pair groups larger than two into groups of two modules.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py", + "ast_data": "FunctionDef name:expand_groups_in_paired_modules_list arg:paired_modules_list arguments arg Assign For If Compare Call Raise Call If Compare Call Call If Compare Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "batch_stride", + "source_code": "def batch_stride(self, node: IRNode, default_value: int=0) -> str:\n if node is None:\n return str(default_value)\n if len(node.get_size()) < 3:\n return str(default_value)\n batch_stride = node.get_stride()[0]\n if V.graph.sizevars.statically_known_leq(batch_stride, 1):\n return str(batch_stride)\n return '{}*{}'.format(self.find_symbol(node, 'size', dim=1) or node.get_size()[1], self.find_symbol(node, 'size', dim=2) or node.get_size()[2])", + "docstring": "Hook called from template code to get the batch stride of an arg. Returns 0 if batch dim is not present. This method assumes that batch stride is the largest stride.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", + "ast_data": "FunctionDef name:batch_stride arg:self arg:node arg:default_value arguments arg arg arg If Compare Return return:yes Call If Compare Call Call Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes Call BoolOp Call Call BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "_sharded_post_state_dict_hook", + "source_code": "@no_type_check\ndef _sharded_post_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState, state_dict: dict[str, Any], prefix: str) -> dict[str, Any]:\n\n def param_hook(state_dict: dict[str, Any], prefix: str, fqn: str):\n param = state_dict[fqn]\n if not fsdp_state._state_dict_config._use_dtensor:\n sharded_tensor = _ext_chunk_tensor(tensor=param, rank=fsdp_state.rank, world_size=fsdp_state.world_size, num_devices_per_node=fsdp_state._device_handle.device_count(), pg=fsdp_state.process_group, fsdp_extension=fsdp_state._fsdp_extension)\n else:\n sharded_tensor = _ext_chunk_dtensor(tensor=param, rank=fsdp_state.rank, device_mesh=fsdp_state._device_mesh, fsdp_extension=fsdp_state._fsdp_extension)\n if fsdp_state._state_dict_config.offload_to_cpu:\n sharded_tensor = sharded_tensor.cpu()\n state_dict[fqn] = sharded_tensor\n return _common_unshard_post_state_dict_hook(module, fsdp_state, state_dict, prefix, param_hook)", + "docstring": "The hook replaces the unflattened, unsharded parameter in the state_dict with a unflattened, sharded parameter (a ShardedTensor).", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py", + "ast_data": "FunctionDef name:_sharded_post_state_dict_hook arg:module arg:fsdp_state arg:state_dict arg:prefix arguments arg arg arg arg FunctionDef name:param_hook arg:state_dict arg:prefix arg:fqn arguments arg arg arg Assign If Assign Call Call Assign Call If Assign Call Assign Return return:yes Call" + }, + { + "library": "pygame", + "name": "_parse_font_entry_darwin", + "source_code": "def _parse_font_entry_darwin(name, filepath, fonts):\n name = _simplename(name)\n mods = ('regular',)\n for mod in mods:\n if mod in name:\n name = name.replace(mod, '')\n bold = italic = False\n if 'bold' in name:\n name = name.replace('bold', '')\n bold = True\n if 'italic' in name:\n name = name.replace('italic', '')\n italic = True\n _addfont(name, bold, italic, filepath, fonts)", + "docstring": "Parses a font entry for macOS :param name: The filepath without extensions or directories :param filepath: The full path to the font :param fonts: The pygame font dictionary to add the parsed font data to.", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:_parse_font_entry_darwin arg:name arg:filepath arg:fonts arguments arg arg arg Assign Call Assign For If Compare Assign Call Assign If Compare Assign Call Assign If Compare Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "get_output_mask_at", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", + "docstring": "Retrieves the output mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple outputs).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:get_output_mask_at arg:self arg:node_index arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "always_wrap_policy", + "source_code": "def always_wrap_policy(*args, **kwargs) -> bool:\n return True", + "docstring": "A simple recursive wrap policy that always returns `_recursive_wrap`.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py", + "ast_data": "FunctionDef name:always_wrap_policy arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_safe_globals", + "source_code": "def get_safe_globals() -> list[Union[Callable, tuple[Callable, str]]]:\n return _weights_only_unpickler._get_safe_globals()", + "docstring": "Returns the list of user-added globals that are safe for `` load.", + "type": "function", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "FunctionDef name:get_safe_globals arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "save_op_profiles", + "source_code": "def save_op_profiles(op_profiles: dict[str, set[OpProfile]], f: FileLike) -> None:\n yaml_str = generate_yaml_from_profiles(op_profiles)\n if isinstance(f, (str, os.PathLike)):\n f = os.fspath(f)\n with open(f, 'w') as file:\n file.write(yaml_str)\n elif isinstance(f, io.BytesIO):\n f.write(yaml_str.encode('utf-8'))\n else:\n raise ValueError(f'Invalid type of file {f}')", + "docstring": "Serializes the given operator profiles into a yaml format and saves it to the given file. The operator profile can be loaded back using .", + "type": "function", + "file_path": "pytorch\\torch\\_library\\fake_profile.py", + "ast_data": "FunctionDef name:save_op_profiles arg:op_profiles arg:f arguments arg arg Assign Call If Call Assign Call With Call Call If Call Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "_default_schedule_fn", + "source_code": "def _default_schedule_fn(_: int) -> ProfilerAction:\n return ProfilerAction.RECORD", + "docstring": "Default profiler behavior - immediately starts recording the events, keeps doing it on every profiler step.", + "type": "function", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:_default_schedule_fn arg:_ arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "spatial_soft_argmax2d", + "source_code": "def spatial_soft_argmax2d(input: Tensor, temperature: Optional[Tensor]=None, normalized_coordinates: bool=True) -> Tensor:\n if temperature is None:\n temperature = tensor(1.0)\n input_soft: Tensor = spatial_softmax2d(input, temperature)\n output: Tensor = spatial_expectation2d(input_soft, normalized_coordinates)\n return output", + "docstring": "Compute the Spatial Soft-Argmax 2D of a given input heatmap. Args: input: the given heatmap with shape :math:. temperature: factor to apply to input. normalized_coordinates: whether to return the coordinates normalized in the range of :math:. Otherwise, it will return the coordinates in the range of the input shape. Returns: the index of the maximum 2d coordinates of the give map :math:. The output order is x-coord and y-coord. Examples: >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 10., 0.], ... [0., 0., 0.]]]]) >>> spatial_soft_argmax2d(input, normalized_coordinates=False) tensor([[[1.0000, 1.0000]]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", + "ast_data": "FunctionDef name:spatial_soft_argmax2d arg:input arg:temperature arg:normalized_coordinates arguments arg arg arg If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "init", + "source_code": "def init(self, est, begin_at_stage=0):\n header_fields = ['Iter', 'Train Loss']\n verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']\n if est.subsample < 1:\n header_fields.append('OOB Improve')\n verbose_fmt.append('{oob_impr:>16.4f}')\n header_fields.append('Remaining Time')\n verbose_fmt.append('{remaining_time:>16s}')\n print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields))\n self.verbose_fmt = ' '.join(verbose_fmt)\n self.verbose_mod = 1\n self.start_time = time()\n self.begin_at_stage = begin_at_stage", + "docstring": "Initialize reporter Parameters ---------- est : Estimator The estimator begin_at_stage : int, default=0 stage at which to begin reporting", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:init arg:self arg:est arg:begin_at_stage arguments arg arg arg Assign Assign If Compare Call Call Call Call Call Call Call Assign Call Assign Assign Call Assign" + }, + { + "library": "pytorch", + "name": "save", + "source_code": "def save(obj: object, f: FileLike, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool=True, _disable_byteorder_record: bool=False) -> None:\n torch._C._log_api_usage_once('torch.save')\n _check_dill_version(pickle_module)\n _check_save_filelike(f)\n if isinstance(f, (str, os.PathLike)):\n f = os.fspath(f)\n if _use_new_zipfile_serialization:\n with _open_zipfile_writer(f) as opened_zipfile:\n _save(obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record)\n return\n else:\n global _serialization_tls\n if _serialization_tls.skip_data:\n raise RuntimeError('Cannot use skip_data=True with _use_new_zipfile_serialization=False')\n with _open_file_like(f, 'wb') as opened_file:\n _legacy_save(obj, opened_file, pickle_module, pickle_protocol)", + "docstring": "save(obj, f, pickle_module=pickle, pickle_protocol=2, _use_new_zipfile_serialization=True) Saves an object to a disk file. See also: :ref: See :ref: for more advanced tools to manipulate a checkpoint. Args: obj: saved object f: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a file name pickle_module: module used for pickling metadata and objects pickle_protocol: can be specified to override the default protocol .. note:: A common PyTorch convention is to save tensors using .pt file extension. .. note:: PyTorch preserves storage sharing across serialization. See :ref: for more details. .. note:: The 1.6 release of PyTorch switched ``. Example: >>> # xdoctest: +SKIP(\"makes cwd dirty\") >>> # Save to file >>> x = torch.tensor([0, 1, 2, 3, 4]) >>> torch.save(x, \"tensor.pt\") >>> # Save to io.BytesIO buffer >>> buffer = io.BytesIO() >>> torch.save(x, buffer)", + "type": "function", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "FunctionDef name:save arg:obj arg:f arg:pickle_module arg:pickle_protocol arg:_use_new_zipfile_serialization arg:_disable_byteorder_record arguments arg arg arg arg arg arg Call Call Call If Call Assign Call If With Call Call Return return:no If Raise Call With Call Call" + }, + { + "library": "scikit-learn", + "name": "load_descr", + "source_code": "def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding='utf-8'):\n path = resources.files(descr_module) / descr_file_name\n return path.read_text(encoding=encoding)", + "docstring": "Load from with . Parameters ---------- descr_file_name : str, default=None Name of rst file to be loaded from . For example . See also :func:. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where lives. See also :func:. The default is . encoding : str, default=\"utf-8\" Name of the encoding that will be decoded with. The default is 'utf-8'. .. versionadded:: 1.4 Returns ------- fdescr : str Content of .", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", + "ast_data": "FunctionDef name:load_descr arg:descr_file_name arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "has_record", + "source_code": "def has_record(self, name: str) -> bool:\n return name in self.archive_file.get_all_written_records()", + "docstring": "Check if a record exists in the archive.", + "type": "method", + "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py", + "ast_data": "FunctionDef name:has_record arg:self arg:name arguments arg arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "publish_traceback", + "source_code": "def publish_traceback(debug_server_urls, graph, feed_dict, fetches, old_graph_version):\n from tensorflow.python.debug.lib import source_remote\n if graph.version > old_graph_version:\n run_key = common.get_run_key(feed_dict, fetches)\n source_remote.send_graph_tracebacks(debug_server_urls, run_key, traceback.extract_stack(), graph, send_source=True)\n return graph.version\n else:\n return old_graph_version", + "docstring": "Publish traceback and source code if graph version is new. is compared with . If the former is higher (i.e., newer), the graph traceback and the associated source code is sent to the debug server at the specified gRPC URLs. Args: debug_server_urls: A single gRPC debug server URL as a or a of debug server URLs. graph: A Python object. feed_dict: Feed dictionary given to the call. fetches: Fetches from the call. old_graph_version: Old graph version to compare to. Returns: If , the new graph version as an . Else, the is returned.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py", + "ast_data": "FunctionDef name:publish_traceback arg:debug_server_urls arg:graph arg:feed_dict arg:fetches arg:old_graph_version arguments arg arg arg arg arg If Compare Assign Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "truncdiv", + "source_code": "def truncdiv(self, x0: T, x1: T) -> T:\n raise NotImplementedError", + "docstring": "C-style trunc division between integers only. Computes the true division of two numbers and rounds the result to zero.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:truncdiv arg:self arg:x0 arg:x1 arguments arg arg arg Raise" + }, + { + "library": "seaborn", + "name": "__init__", + "source_code": "def __init__(self, plotter, palette=None, order=None, norm=None, saturation=1):\n super().__init__(plotter)\n data = plotter.plot_data.get('hue', pd.Series(dtype=float))\n if isinstance(palette, np.ndarray):\n msg = 'Numpy array is not a supported type for `palette`. Please convert your palette to a list. This will become an error in v0.14'\n warnings.warn(msg, stacklevel=4)\n palette = palette.tolist()\n if data.isna().all():\n if palette is not None:\n msg = 'Ignoring `palette` because no `hue` variable has been assigned.'\n warnings.warn(msg, stacklevel=4)\n else:\n map_type = self.infer_map_type(palette, norm, plotter.input_format, plotter.var_types['hue'])\n if map_type == 'numeric':\n data = pd.to_numeric(data)\n levels, lookup_table, norm, cmap = self.numeric_mapping(data, palette, norm)\n elif map_type == 'categorical':\n cmap = norm = None\n levels, lookup_table = self.categorical_mapping(data, palette, order)\n else:\n cmap = norm = None\n levels, lookup_table = self.categorical_mapping(list(data), palette, order)\n self.saturation = saturation\n self.map_type = map_type\n self.lookup_table = lookup_table\n self.palette = palette\n self.levels = levels\n self.norm = norm\n self.cmap = cmap", + "docstring": "Map the levels of the variable to distinct colors. Parameters ---------- # TODO add generic parameters", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:plotter arg:palette arg:order arg:norm arg:saturation arguments arg arg arg arg arg arg Call Call Assign Call Call If Call Assign Call Assign Call If Call Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Assign Call Assign Assign Call Call Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "is_ucc_available", + "source_code": "def is_ucc_available() -> bool:\n return _UCC_AVAILABLE", + "docstring": "Check if the UCC backend is available.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:is_ucc_available arguments Return return:yes" + }, + { + "library": "scipy", + "name": "assert_almost_equal", + "source_code": "def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):\n rtol, atol = (0, 1.5 * 10 ** (-decimal))\n return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)", + "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_array_api_no_0d.py", + "ast_data": "FunctionDef name:assert_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "An invalid version was found, users should refer to PEP 440.", + "type": "class", + "file_path": "seaborn\\seaborn\\external\\version.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "tensorflow", + "name": "avg_pool1d", + "source_code": "@tf_export('nn.avg_pool1d')\n@dispatch.add_dispatch_support\ndef avg_pool1d(input, ksize, strides, padding, data_format='NWC', name=None):\n with ops.name_scope(name, 'AvgPool1D', [input]) as name:\n if data_format is None:\n data_format = 'NWC'\n channel_index = 1 if data_format.startswith('NC') else 2\n ksize = [1] + _get_sequence(ksize, 1, channel_index, 'ksize')\n strides = [1] + _get_sequence(strides, 1, channel_index, 'strides')\n expanding_dim = 1 if data_format == 'NWC' else 2\n data_format = 'NHWC' if data_format == 'NWC' else 'NCHW'\n input = array_ops.expand_dims_v2(input, expanding_dim)\n result = gen_nn_ops.avg_pool(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)\n return array_ops.squeeze(result, expanding_dim)", + "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D of the format specified by . ksize: An int or list of that has length or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: An optional string from: \"NWC\", \"NCW\". Defaults to \"NWC\". name: A name for the operation (optional). Returns: A of format specified by . The max pooled output tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:avg_pool1d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Assign Compare Assign Compare Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_scalar_binopt", + "source_code": "def _scalar_binopt(self, other, op):\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res", + "docstring": "Scalar version of self._binopt, for cases in which no new nonzeros are added. Produces a new sparse array in canonical form.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:_scalar_binopt arg:self arg:other arg:op arguments arg arg arg Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "delete", + "source_code": "def delete(self):\n if not self._auto_gc_enabled:\n raise TypeError('Persistent tensor %s may have already been deleted.' % self.handle)\n self._auto_gc_enabled = False\n holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle)\n self._session.run(deleter, feed_dict={holder: self.handle})", + "docstring": "Force the deletion of this persistent tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:delete arg:self arguments arg If Raise Call Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_initialize_physical_devices", + "source_code": "def _initialize_physical_devices(self, reinitialize=False):\n with self._device_lock:\n if not reinitialize and self._physical_devices is not None:\n return\n devs = pywrap_tfe.TF_ListPhysicalDevices()\n self._physical_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in devs]\n self._physical_device_to_index = {p: i for i, p in enumerate(self._physical_devices)}\n pluggable_devs = pywrap_tfe.TF_ListPluggablePhysicalDevices()\n self._pluggable_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in pluggable_devs]\n self._visible_device_list = list(self._physical_devices)\n self._memory_growth_map = {d: None for d in self._physical_devices if d.device_type == 'GPU' or d in self._pluggable_devices}\n self._import_config()", + "docstring": "Gets local devices visible to the system. Args: reinitialize: If True, reinitializes self._physical_devices so that dynamic registered devices will also be visible to the python front-end.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:_initialize_physical_devices arg:self arg:reinitialize arguments arg arg With If BoolOp Compare Return return:no Assign Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Call Call Assign Call Assign BoolOp Compare Compare Call" + }, + { + "library": "django", + "name": "ManyToManyRawIdWidget", + "source_code": "class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):\n template_name = 'admin/widgets/many_to_many_raw_id.html'\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if self.admin_site.is_registered(self.rel.model):\n context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'\n return context\n\n def url_parameters(self):\n return self.base_url_parameters()\n\n def label_and_url_for_value(self, value):\n return ('', '')\n\n def value_from_datadict(self, data, files, name):\n value = data.get(name)\n if value:\n return value.split(',')\n\n def format_value(self, value):\n return ','.join((str(v) for v in value)) if value else ''", + "docstring": "A Widget for displaying ManyToMany ids in the \"raw_id\" interface rather than in a box.", + "type": "class", + "file_path": "django\\django\\contrib\\admin\\widgets.py", + "ast_data": "ClassDef name:ManyToManyRawIdWidget Assign FunctionDef name:get_context arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call If Call Assign Return return:yes FunctionDef name:url_parameters arg:self arguments arg Return return:yes Call FunctionDef name:label_and_url_for_value arg:self arg:value arguments arg arg Return return:yes FunctionDef name:value_from_datadict arg:self arg:data arg:files arg:name arguments arg arg arg arg Assign Call If Return return:yes Call FunctionDef name:format_value arg:self arg:value arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "get_source_files", + "source_code": "def get_source_files(source_path: str) -> typing.Generator[str, None, None]:\n for root, dirs, fnames in os.walk(source_path):\n root_rel_path = os.path.relpath(root, source_path)\n for fname in fnames:\n yield os.path.join(root_rel_path, fname)", + "docstring": "Generate the list of files present in the source directory.", + "type": "function", + "file_path": "pandas\\web\\pandas_web.py", + "ast_data": "FunctionDef name:get_source_files arg:source_path arguments arg For Call Assign Call For Call" + }, + { + "library": "pytorch", + "name": "LambdaSL", + "source_code": "class LambdaSL(BaseScheduler):\n\n def __init__(self, sparsifier, sl_lambda, last_epoch=-1, verbose=False):\n self.sparsifier = sparsifier\n if not isinstance(sl_lambda, list) and (not isinstance(sl_lambda, tuple)):\n self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)\n else:\n if len(sl_lambda) != len(sparsifier.groups):\n raise ValueError(f'Expected {len(sparsifier.groups)} lr_lambdas, but got {len(sl_lambda)}')\n self.sl_lambdas = list(sl_lambda)\n super().__init__(sparsifier, last_epoch, verbose)\n\n def get_sl(self):\n if not self._get_sl_called_within_step:\n warnings.warn('To get the last sparsity level computed by the scheduler, please use `get_last_sl()`.')\n return [base_sl * lmbda(self.last_epoch) for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)]", + "docstring": "Sets the sparsity level of each parameter group to the final sl times a given function. When last_epoch=-1, sets initial sl as zero. Args: sparsifier (BaseSparsifier): Wrapped sparsifier. sl_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in sparsifier.param_groups. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``. Example: >>> # Assuming sparsifier has two groups. >>> lambda1 = lambda epoch: epoch // 30 >>> lambda2 = lambda epoch: 0.95 ** epoch >>> # xdoctest: +SKIP >>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step()", + "type": "class", + "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\lambda_scheduler.py", + "ast_data": "ClassDef name:LambdaSL FunctionDef name:__init__ arg:self arg:sparsifier arg:sl_lambda arg:last_epoch arg:verbose arguments arg arg arg arg arg Assign If BoolOp Call Call Assign Call If Compare Call Call Raise Call Call Call Assign Call Call Call FunctionDef name:get_sl arg:self arguments arg If Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "write_header", + "source_code": "def write_header(self, shape, mclass, is_complex=False, is_logical=False, nzmax=0):\n name = self._var_name\n is_global = self._var_is_global\n self._mat_tag_pos = self.file_stream.tell()\n self.write_bytes(self.mat_tag)\n af = np.zeros((), NDT_ARRAY_FLAGS)\n af['data_type'] = miUINT32\n af['byte_count'] = 8\n flags = is_complex << 3 | is_global << 2 | is_logical << 1\n af['flags_class'] = mclass | flags << 8\n af['nzmax'] = nzmax\n self.write_bytes(af)\n self.write_element(np.array(shape, dtype='i4'))\n name = np.asarray(name)\n if name == '':\n self.write_smalldata_element(name, miINT8, 0)\n else:\n self.write_element(name, miINT8)\n self._var_name = ''\n self._var_is_global = False", + "docstring": "Write header for given data options shape : sequence array shape mclass - mat5 matrix class is_complex - True if matrix is complex is_logical - True if matrix is logical nzmax - max non zero elements for sparse arrays We get the name and the global flag from the object, and reset them to defaults after we've used them", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:write_header arg:self arg:shape arg:mclass arg:is_complex arg:is_logical arg:nzmax arguments arg arg arg arg arg arg Assign Assign Assign Call Call Assign Call Assign Assign Assign Assign Assign Call Call Call Assign Call If Compare Call Call Assign Assign" + }, + { + "library": "kornia", + "name": "add_weighted", + "source_code": "def add_weighted(src1: Tensor, alpha: Union[float, Tensor], src2: Tensor, beta: Union[float, Tensor], gamma: Union[float, Tensor]) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(src1)\n KORNIA_CHECK_IS_TENSOR(src2)\n KORNIA_CHECK(src1.shape == src2.shape, f'src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}')\n if isinstance(alpha, Tensor):\n KORNIA_CHECK(src1.shape == alpha.shape, 'alpha has a different shape than src.')\n else:\n alpha = tensor(alpha, dtype=src1.dtype, device=src1.device)\n if isinstance(beta, Tensor):\n KORNIA_CHECK(src1.shape == beta.shape, 'beta has a different shape than src.')\n else:\n beta = tensor(beta, dtype=src1.dtype, device=src1.device)\n if isinstance(gamma, Tensor):\n KORNIA_CHECK(src1.shape == gamma.shape, 'gamma has a different shape than src.')\n else:\n gamma = tensor(gamma, dtype=src1.dtype, device=src1.device)\n return src1 * alpha + src2 * beta + gamma", + "docstring": "Calculate the weighted sum of two Tensors. .. image:: _static/img/add_weighted.png The function calculates the weighted sum of two Tensors as follows: .. math:: out = src1 * alpha + src2 * beta + gamma Args: src1: Tensor with an arbitrary shape, equal to shape of src2. alpha: weight of the src1 elements as Union[float, Tensor]. src2: Tensor with an arbitrary shape, equal to shape of src1. beta: weight of the src2 elements as Union[float, Tensor]. gamma: scalar added to each sum as Union[float, Tensor]. Returns: Weighted Tensor with shape equal to src1 and src2 shapes. Example: >>> input1 = torch.rand(1, 1, 5, 5) >>> input2 = torch.rand(1, 1, 5, 5) >>> output = add_weighted(input1, 0.5, input2, 0.5, 1.0) >>> output.shape torch.Size([1, 1, 5, 5]) Notes: Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\core.py", + "ast_data": "FunctionDef name:add_weighted arg:src1 arg:alpha arg:src2 arg:beta arg:gamma arguments arg arg arg arg arg Call Call Call Compare If Call Call Compare Assign Call If Call Call Compare Assign Call If Call Call Compare Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add_self_request(self).add(estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller='fit', callee='fit')).add(splitter=self.cv, method_mapping=MethodMapping().add(caller='fit', callee='split'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\calibration.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "feature_implies", + "source_code": "def feature_implies(self, names, keep_origins=False):\n\n def get_implies(name, _caller=set()):\n implies = set()\n d = self.feature_supported[name]\n for i in d.get('implies', []):\n implies.add(i)\n if i in _caller:\n continue\n _caller.add(name)\n implies = implies.union(get_implies(i, _caller))\n return implies\n if isinstance(names, str):\n implies = get_implies(names)\n names = [names]\n else:\n assert hasattr(names, '__iter__')\n implies = set()\n for n in names:\n implies = implies.union(get_implies(n))\n if not keep_origins:\n implies.difference_update(names)\n return implies", + "docstring": "Return a set of CPU features that implied by 'names' Parameters ---------- names : str or sequence of str CPU feature name(s) in uppercase. keep_origins : bool if False(default) then the returned set will not contain any features from 'names'. This case happens only when two features imply each other. Examples -------- >>> self.feature_implies(\"SSE3\") {'SSE', 'SSE2'} >>> self.feature_implies(\"SSE2\") {'SSE'} >>> self.feature_implies(\"SSE2\", keep_origins=True) # 'SSE2' found here since 'SSE' and 'SSE2' imply each other {'SSE', 'SSE2'}", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:feature_implies arg:self arg:names arg:keep_origins arguments arg arg arg FunctionDef name:get_implies arg:name arg:_caller arguments arg arg Call Assign Call Assign For Call Call If Compare Call Assign Call Call Return return:yes If Call Assign Call Assign Call Assign Call For Assign Call Call If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_shape", + "source_code": "@property\ndef variable_shape(self):\n return tensor_shape.TensorShape([self.dimension])", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load_ast", + "source_code": "def load_ast(nodes, indentation=' ', include_source_map=False, delete_on_exit=True):\n if not isinstance(nodes, (list, tuple)):\n nodes = (nodes,)\n source = parser.unparse(nodes, indentation=indentation)\n module, _ = load_source(source, delete_on_exit)\n if include_source_map:\n source_map = origin_info.create_source_map(nodes, source, module.__file__)\n else:\n source_map = None\n return (module, source, source_map)", + "docstring": "Loads the given AST as a Python module. Compiling the AST code this way ensures that the source code is readable by e.g. or . Args: nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST object. indentation: Text, the string to use for indentation. include_source_map: bool, whether return a source map. delete_on_exit: bool, whether to delete the temporary file used for compilation on exit. Returns: Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing: the module containing the unparsed nodes, the source code corresponding to nodes, and the source map. Is include_source_map is False, the source map will be None.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\loader.py", + "ast_data": "FunctionDef name:load_ast arg:nodes arg:indentation arg:include_source_map arg:delete_on_exit arguments arg arg arg arg If Call Assign Assign Call Assign Call If Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "TimedAnimation", + "source_code": "class TimedAnimation(Animation):\n\n def __init__(self, fig, interval=200, repeat_delay=0, repeat=True, event_source=None, *args, **kwargs):\n self._interval = interval\n self._repeat_delay = repeat_delay if repeat_delay is not None else 0\n self._repeat = repeat\n if event_source is None:\n event_source = fig.canvas.new_timer(interval=self._interval)\n super().__init__(fig, *args, event_source=event_source, **kwargs)\n\n def _step(self, *args):\n still_going = super()._step(*args)\n if not still_going:\n if self._repeat:\n self._init_draw()\n self.frame_seq = self.new_frame_seq()\n self.event_source.interval = self._repeat_delay\n return True\n else:\n self.pause()\n if self._blit:\n self._fig.canvas.mpl_disconnect(self._resize_id)\n self._fig.canvas.mpl_disconnect(self._close_id)\n self.event_source = None\n return False\n self.event_source.interval = self._interval\n return True", + "docstring": "subclass for time-based animation. A new frame is drawn every *interval* milliseconds. .. note:: You must store the created Animation in a variable that lives as long as the animation should run. Otherwise, the Animation object will be garbage-collected and the animation stops. Parameters ---------- fig : The figure object used to get needed events, such as draw or resize. interval : int, default: 200 Delay between frames in milliseconds. repeat_delay : int, default: 0 The delay in milliseconds between consecutive animation runs, if *repeat* is True. repeat : bool, default: True Whether the animation repeats when the sequence of frames is completed. blit : bool, default: False Whether blitting is used to optimize drawing.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "ClassDef name:TimedAnimation FunctionDef name:__init__ arg:self arg:fig arg:interval arg:repeat_delay arg:repeat arg:event_source arguments arg arg arg arg arg arg arg arg Assign Assign Compare Assign If Compare Assign Call Call Call FunctionDef name:_step arg:self arguments arg arg Assign Call Call If If Call Assign Call Assign Return return:yes Call If Call Call Assign Return return:yes Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_frame", + "source_code": "def get_frame(self):\n return self.legendPatch", + "docstring": "Return the used to frame the legend.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:get_frame arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "get_residual", + "source_code": "def get_residual(self):\n return self._data[10]", + "docstring": "Return weighted sum of squared residuals of the spline approximation. This is equivalent to:: sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "FunctionDef name:get_residual arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_polyder", + "source_code": "def _polyder(p, m):\n if m == 0:\n result = p\n else:\n n = len(p)\n if n <= m:\n result = np.zeros_like(p[:1, ...])\n else:\n dp = p[:-m].copy()\n for k in range(m):\n rng = np.arange(n - k - 1, m - k - 1, -1)\n dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))\n result = dp\n return result", + "docstring": "Differentiate polynomials represented with coefficients. p must be a 1-D or 2-D array. In the 2-D case, each column gives the coefficients of a polynomial; the first row holds the coefficients associated with the highest power. m must be a nonnegative integer. (numpy.polyder doesn't handle the 2-D case.)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_savitzky_golay.py", + "ast_data": "FunctionDef name:_polyder arg:p arg:m arguments arg arg If Compare Assign Assign Call If Compare Assign Call Assign Call For Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_try_cast", + "source_code": "def _try_cast(arr: list | np.ndarray, dtype: np.dtype, copy: bool) -> ArrayLike:\n is_ndarray = isinstance(arr, np.ndarray)\n if dtype == object:\n if not is_ndarray:\n subarr = construct_1d_object_array_from_listlike(arr)\n return subarr\n return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)\n elif dtype.kind == 'U':\n if is_ndarray:\n arr = cast(np.ndarray, arr)\n shape = arr.shape\n if arr.ndim > 1:\n arr = arr.ravel()\n else:\n shape = (len(arr),)\n return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(shape)\n elif dtype.kind in 'mM':\n if is_ndarray:\n arr = cast(np.ndarray, arr)\n if arr.ndim == 2 and arr.shape[1] == 1:\n return maybe_cast_to_datetime(arr[:, 0], dtype).reshape(arr.shape)\n return maybe_cast_to_datetime(arr, dtype)\n elif dtype.kind in 'iu':\n subarr = maybe_cast_to_integer_array(arr, dtype)\n elif not copy:\n subarr = np.asarray(arr, dtype=dtype)\n else:\n subarr = np.array(arr, dtype=dtype, copy=copy)\n return subarr", + "docstring": "Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- arr : ndarray or list Excludes: ExtensionArray, Series, Index. dtype : np.dtype copy : bool If False, don't copy the data if not needed. Returns ------- np.ndarray or ExtensionArray", + "type": "function", + "file_path": "pandas\\pandas\\core\\construction.py", + "ast_data": "FunctionDef name:_try_cast arg:arr arg:dtype arg:copy arguments arg arg arg Assign Call If Compare If Assign Call Return return:yes Return return:yes Call Call If Compare If Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Call If Compare If Assign Call If BoolOp Compare Compare Return return:yes Call Call Return return:yes Call If Compare Assign Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "info", + "source_code": "def info(request, message, extra_tags='', fail_silently=False):\n add_message(request, constants.INFO, message, extra_tags=extra_tags, fail_silently=fail_silently)", + "docstring": "Add a message with the `` level.", + "type": "function", + "file_path": "django\\django\\contrib\\messages\\api.py", + "ast_data": "FunctionDef name:info arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call" + }, + { + "library": "kornia", + "name": "rgba_to_bgr", + "source_code": "def rgba_to_bgr(image: Tensor) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if len(image.shape) < 3 or image.shape[-3] != 4:\n raise ValueError(f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')\n x_rgb: Tensor = rgba_to_rgb(image)\n return rgb_to_bgr(x_rgb)", + "docstring": "Convert an image from RGBA to BGR. Args: image: RGBA Image to be converted to BGR of shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_bgr(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "FunctionDef name:rgba_to_bgr arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ConvReLU2d", + "source_code": "class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):\n _FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvReLU2d\n _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv2d]] = nn.Conv2d\n _FLOAT_BN_MODULE: ClassVar[Optional[type[nn.Module]]] = None\n _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.ReLU\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', qconfig=None):\n super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode, qconfig=qconfig)\n assert qconfig, 'qconfig must be provided for QAT module'\n self.qconfig = qconfig\n self.weight_fake_quant = self.qconfig.weight()\n\n def forward(self, input):\n return F.relu(self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)", + "docstring": "A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with FakeQuantize modules for weight for quantization aware training. We combined the interface of :class: and :class:. Attributes: weight_fake_quant: fake quant module for weight", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py", + "ast_data": "ClassDef name:ConvReLU2d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_char_ngrams", + "source_code": "def _char_ngrams(self, text_document):\n text_document = self._white_spaces.sub(' ', text_document)\n text_len = len(text_document)\n min_n, max_n = self.ngram_range\n if min_n == 1:\n ngrams = list(text_document)\n min_n += 1\n else:\n ngrams = []\n ngrams_append = ngrams.append\n for n in range(min_n, min(max_n + 1, text_len + 1)):\n for i in range(text_len - n + 1):\n ngrams_append(text_document[i:i + n])\n return ngrams", + "docstring": "Tokenize text_document into a sequence of character n-grams", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:_char_ngrams arg:self arg:text_document arguments arg arg Assign Call Assign Call Assign If Compare Assign Call Assign Assign For Call Call For Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_add_iterable_element", + "source_code": "def _add_iterable_element(self, f, v, feature_names, vocab, *, fitting=True, transforming=False, indices=None, values=None):\n for vv in v:\n if isinstance(vv, str):\n feature_name = '%s%s%s' % (f, self.separator, vv)\n vv = 1\n else:\n raise TypeError(f'Unsupported type {type(vv)} in iterable value. Only iterables of string are supported.')\n if fitting and feature_name not in vocab:\n vocab[feature_name] = len(feature_names)\n feature_names.append(feature_name)\n if transforming and feature_name in vocab:\n indices.append(vocab[feature_name])\n values.append(self.dtype(vv))", + "docstring": "Add feature names for iterable of strings", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py", + "ast_data": "FunctionDef name:_add_iterable_element arg:self arg:f arg:v arg:feature_names arg:vocab arguments arg arg arg arg arg arg arg arg arg For If Call Assign Assign Raise Call Call If BoolOp Compare Assign Call Call If BoolOp Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "load_constants", + "source_code": "def load_constants(self, constants_map: dict[str, torch.Tensor], *, check_full_update: bool, user_managed: bool=False) -> None:\n self.loader.load_constants(constants_map, False, check_full_update, user_managed)", + "docstring": "Given a mapping of constant fqns to tensors, load the constants into the model. You can use `` to get the list of constant fqns that are needed in the compiled model. Args: constants_map: A mapping of constant fqns to tensors. check_full_update: Whether to add check to see if all the constants are updated and have values.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\package\\package.py", + "ast_data": "FunctionDef name:load_constants arg:self arg:constants_map arguments arg arg arg arg Call" + }, + { + "library": "numpy", + "name": "lagfromroots", + "source_code": "def lagfromroots(roots):\n return pu._fromroots(lagline, lagmul, roots)", + "docstring": "Generate a Laguerre series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Laguerre form, where the :math: are the roots specified in . If a zero has multiplicity n, then it must appear in n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are , then .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Laguerre form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then is a real array, if some of the roots are complex, then is complex even if all the coefficients in the result are real (see Examples below). See Also -------- numpy.polynomial.polynomial.polyfromroots numpy.polynomial.legendre.legfromroots numpy.polynomial.chebyshev.chebfromroots numpy.polynomial.hermite.hermfromroots numpy.polynomial.hermite_e.hermefromroots Examples -------- >>> from numpy.polynomial.laguerre import lagfromroots, lagval >>> coef = lagfromroots((-1, 0, 1)) >>> lagval((-1, 0, 1), coef) array([0., 0., 0.]) >>> coef = lagfromroots((-1j, 1j)) >>> lagval((-1j, 1j), coef) array([0.+0.j, 0.+0.j])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\laguerre.py", + "ast_data": "FunctionDef name:lagfromroots arg:roots arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_weight_collections", + "source_code": "def set_weight_collections(self, weight_collections):\n self._weight_collections = weight_collections", + "docstring": "Sets the weight collections for the layer. Args: weight_collections: A list of collection names to which the Variable will be added.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:set_weight_collections arg:self arg:weight_collections arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "_rand1", + "source_code": "def _rand1(self, samples):\n r0, r1, r2 = samples[..., :3].T\n return self.population[r0] + self.scale * (self.population[r1] - self.population[r2])", + "docstring": "rand1bin, rand1exp", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py", + "ast_data": "FunctionDef name:_rand1 arg:self arg:samples arguments arg arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "dt", + "source_code": "@property\ndef dt(self):\n return self._dt", + "docstring": "Return the sampling time of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:dt arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "copy", + "source_code": "def copy(self):\n return copy.copy(self)", + "docstring": "Return a shallow copy of this object.", + "type": "method", + "file_path": "django\\django\\utils\\datastructures.py", + "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_tile_description_to_json", + "source_code": "@classmethod\ndef _tile_description_to_json(cls, tile_desc):\n if tile_desc is None:\n return None\n math_instruction_dict = None\n if hasattr(tile_desc, 'math_instruction') and tile_desc.math_instruction is not None:\n math_instruction = tile_desc.math_instruction\n math_instruction_dict = {'instruction_shape': math_instruction.instruction_shape, 'element_a': cls._enum_to_json(math_instruction.element_a), 'element_b': cls._enum_to_json(math_instruction.element_b), 'element_accumulator': cls._enum_to_json(math_instruction.element_accumulator), 'opcode_class': cls._enum_to_json(math_instruction.opcode_class), 'math_operation': cls._enum_to_json(math_instruction.math_operation)}\n if hasattr(math_instruction, 'element_scale_factor') and math_instruction.element_scale_factor is not None:\n math_instruction_dict['element_scale_factor'] = cls._enum_to_json(math_instruction.element_scale_factor)\n result = {'threadblock_shape': tile_desc.threadblock_shape, 'stages': tile_desc.stages, 'warp_count': tile_desc.warp_count, 'math_instruction': math_instruction_dict, 'min_compute': tile_desc.minimum_compute_capability, 'max_compute': tile_desc.maximum_compute_capability, 'cluster_shape': tile_desc.cluster_shape, 'explicit_vector_sizes': tile_desc.explicit_vector_sizes}\n if hasattr(tile_desc, 'tile_shape') and tile_desc.tile_shape != tile_desc.threadblock_shape:\n result['tile_shape'] = tile_desc.tile_shape\n return result", + "docstring": "Convert TileDescription to JSON dict. Args: tile_desc: TileDescription object Returns: dict: Dictionary representation", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py", + "ast_data": "FunctionDef name:_tile_description_to_json arg:cls arg:tile_desc arguments arg arg If Compare Return return:no Assign If BoolOp Call Compare Assign Assign Call Call Call Call Call If BoolOp Call Compare Assign Call Assign If BoolOp Call Compare Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "method_is_overridden", + "source_code": "def method_is_overridden(subclass: type, base_class: type, method_name: str) -> bool:\n base_method = getattr(base_class, method_name)\n sub_method = getattr(subclass, method_name)\n return base_method.__code__ is not sub_method.__code__", + "docstring": "Return True if a method named ``. >>> class Base: ... def foo(self): ... pass >>> class Sub1(Base): ... pass >>> class Sub2(Base): ... def foo(self): ... pass >>> class Sub3(Sub1): ... def foo(self): ... pass >>> class Sub4(Sub2): ... pass >>> method_is_overridden(Sub1, Base, 'foo') False >>> method_is_overridden(Sub2, Base, 'foo') True >>> method_is_overridden(Sub3, Base, 'foo') True >>> method_is_overridden(Sub4, Base, 'foo') True", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\deprecate.py", + "ast_data": "FunctionDef name:method_is_overridden arg:subclass arg:base_class arg:method_name arguments arg arg arg Assign Call Assign Call Return return:yes Compare" + }, + { + "library": "authlib", + "name": "authenticate_refresh_token", + "source_code": "def authenticate_refresh_token(self, refresh_token):\n raise NotImplementedError()", + "docstring": "Get token information with refresh_token string. Developers MUST implement this method in subclass:: def authenticate_refresh_token(self, refresh_token): token = Token.get(refresh_token=refresh_token) if token and not token.refresh_token_revoked: return token :param refresh_token: The refresh token issued to the client :return: token", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py", + "ast_data": "FunctionDef name:authenticate_refresh_token arg:self arg:refresh_token arguments arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "mean_squared_log_error", + "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']}, prefer_skip_nested_validation=True)\ndef mean_squared_log_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average'):\n xp, _ = get_namespace(y_true, y_pred)\n _, y_true, y_pred, _, _ = _check_reg_targets_with_floating_dtype(y_true, y_pred, sample_weight, multioutput, xp=xp)\n if xp.any(y_true <= -1) or xp.any(y_pred <= -1):\n raise ValueError('Mean Squared Logarithmic Error cannot be used when targets contain values less than or equal to -1.')\n return mean_squared_error(xp.log1p(y_true), xp.log1p(y_pred), sample_weight=sample_weight, multioutput=multioutput)", + "docstring": "Mean squared logarithmic error regression loss. Read more in the :ref:. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors when the input is of multioutput format. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_log_error >>> y_true = [3, 5, 2.5, 7] >>> y_pred = [2.5, 5, 4, 8] >>> mean_squared_log_error(y_true, y_pred) 0.039... >>> y_true = [[0.5, 1], [1, 2], [7, 6]] >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]] >>> mean_squared_log_error(y_true, y_pred) 0.044... >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values') array([0.00462428, 0.08377444]) >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.060...", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py", + "ast_data": "FunctionDef name:mean_squared_log_error arg:y_true arg:y_pred arguments arg arg arg arg Assign Call Assign Call If BoolOp Call Compare Call Compare Raise Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "map_fn", + "source_code": "@doc_controls.do_not_generate_docs\ndef map_fn(fn, elems, name=None, dtype=None):\n return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)", + "docstring": "Map the function fn over the elements elems and return the outputs. Args: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:map_fn arg:fn arg:elems arg:name arg:dtype arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_parse_local_version", + "source_code": "def _parse_local_version(local):\n if local is not None:\n return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local)))", + "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").", + "type": "function", + "file_path": "numpy\\numpy\\_utils\\_pep440.py", + "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call" + }, + { + "library": "sphinx", + "name": "IntersphinxDispatcher", + "source_code": "class IntersphinxDispatcher(CustomReSTDispatcher):\n\n def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter) -> tuple[RoleFunction, list[system_message]]:\n if len(role_name) > 9 and role_name.startswith(('external:', 'external+')):\n return (IntersphinxRole(role_name), [])\n else:\n return super().role(role_name, language_module, lineno, reporter)", + "docstring": "Custom dispatcher for external role. This enables :external:***:/:external+***: roles on parsing reST document.", + "type": "class", + "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py", + "ast_data": "ClassDef name:IntersphinxDispatcher FunctionDef name:role arg:self arg:role_name arg:language_module arg:lineno arg:reporter arguments arg arg arg arg arg If BoolOp Compare Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_gradient_components", + "source_code": "def get_gradient_components(self, value):\n return value", + "docstring": "Returns the components of that should be included in gradients. For a ResourceVariable, its gradient component is its handle tensor. For now, we return the ResourceVariable because the gradient infrastructure has special logic to handle ResourceVariables. We should remove the special logic and return the handle tensor. Args: value: A . Returns: itself.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "debug", + "source_code": "def debug(request):\n context_extras = {}\n if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:\n context_extras['debug'] = True\n from django.db import connections\n context_extras['sql_queries'] = lazy(lambda: list(itertools.chain.from_iterable((connections[x].queries for x in connections))), list)\n return context_extras", + "docstring": "Return context variables helpful for debugging.", + "type": "function", + "file_path": "django\\django\\template\\context_processors.py", + "ast_data": "FunctionDef name:debug arg:request arguments arg Assign If BoolOp Compare Call Assign Assign Call arguments Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "key", + "source_code": "def key(profile):\n components = profile['name'].lower().split(' ')\n return ' '.join([components[-1]] + components[:-1])", + "docstring": "Get a sorting key based on the lower case last name, then firstname", + "type": "function", + "file_path": "scikit-learn\\build_tools\\generate_authors_table.py", + "ast_data": "FunctionDef name:key arg:profile arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "fillna", + "source_code": "def fillna(self, value, limit: int | None=None, inplace: bool=False) -> list[Block]:\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if not self._can_hold_na:\n noop = True\n else:\n mask = isna(self.values)\n mask, noop = validate_putmask(self.values, mask)\n if noop:\n return [self.copy(deep=False)]\n if limit is not None:\n mask[mask.cumsum(self.values.ndim - 1) > limit] = False\n if inplace:\n nbs = self.putmask(mask.T, value)\n else:\n nbs = self.where(value, ~mask.T)\n return extend_blocks(nbs)", + "docstring": "fillna on the block with the value. If we fail, then convert to block to hold objects instead and try again", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:fillna arg:self arg:value arg:limit arg:inplace arguments arg arg arg arg Assign Call If Assign Assign Call Assign Call If Return return:yes Call If Compare Assign Compare Call If Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "mail_admins", + "source_code": "def mail_admins(subject, message, fail_silently=False, connection=None, html_message=None):\n _send_server_message(setting_name='ADMINS', subject=subject, message=message, html_message=html_message, fail_silently=fail_silently, connection=connection)", + "docstring": "Send a message to the admins, as defined by the ADMINS setting.", + "type": "function", + "file_path": "django\\django\\core\\mail\\__init__.py", + "ast_data": "FunctionDef name:mail_admins arg:subject arg:message arg:fail_silently arg:connection arg:html_message arguments arg arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "extract_logger_info", + "source_code": "def extract_logger_info(model_a: nn.Module, model_b: nn.Module, logger_cls: Callable, model_name_to_use_for_layer_names: str) -> NSResultsType:\n torch._C._log_api_usage_once('quantization_api._numeric_suite_fx.extract_logger_info')\n results: NSResultsType = {}\n for model in (model_a, model_b):\n _extract_logger_info_one_model(model, results, logger_cls)\n maybe_add_missing_fqns(results)\n results = rekey_logger_info_on_node_name_of_model(results, model_name_to_use_for_layer_names)\n return results", + "docstring": "Traverse all loggers in and , and extract the logged information. Args: model_a: model A model_b: model B logger_cls: class of Logger to use model_name_to_use_for_layer_names: string name of model to use for layer names in the output Return: NSResultsType, containing the logged comparisons", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py", + "ast_data": "FunctionDef name:extract_logger_info arg:model_a arg:model_b arg:logger_cls arg:model_name_to_use_for_layer_names arguments arg arg arg arg Call For Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "to_coo", + "source_code": "def to_coo(self) -> spmatrix:\n import_optional_dependency('scipy')\n from scipy.sparse import coo_matrix\n dtype = find_common_type(self._parent.dtypes.to_list())\n if isinstance(dtype, SparseDtype):\n dtype = dtype.subtype\n cols, rows, data = ([], [], [])\n for col, (_, ser) in enumerate(self._parent.items()):\n sp_arr = ser.array\n row = sp_arr.sp_index.indices\n cols.append(np.repeat(col, len(row)))\n rows.append(row)\n data.append(sp_arr.sp_values.astype(dtype, copy=False))\n cols = np.concatenate(cols)\n rows = np.concatenate(rows)\n data = np.concatenate(data)\n return coo_matrix((data, (rows, cols)), shape=self._parent.shape)", + "docstring": "Return the contents of the frame as a sparse SciPy COO matrix. Returns ------- scipy.sparse.spmatrix If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.sparse.to_dense : Convert a DataFrame with sparse values to dense. Notes ----- The dtype will be the lowest-common-denominator type (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. By numpy.find_common_type convention, mixing int64 and and uint64 will result in a float64 dtype. Examples -------- >>> df = pd.DataFrame({\"A\": pd.arrays.SparseArray([0, 1, 0, 1])}) >>> df.sparse.to_coo()", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py", + "ast_data": "FunctionDef name:to_coo arg:self arguments arg Call Assign Call Call If Call Assign Assign For Call Call Assign Assign Call Call Call Call Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "set_upward", + "source_code": "def set_upward(self, key, value):\n context = self.dicts[-1]\n for d in reversed(self.dicts):\n if key in d:\n context = d\n break\n context[key] = value", + "docstring": "Set a variable in one of the higher contexts if it exists there, otherwise in the current context.", + "type": "method", + "file_path": "django\\django\\template\\context.py", + "ast_data": "FunctionDef name:set_upward arg:self arg:key arg:value arguments arg arg arg Assign For Call If Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "_shutdown", + "source_code": "@abc.abstractmethod\ndef _shutdown(self, death_sig: signal.Signals=signal.SIGTERM) -> None:\n raise NotImplementedError", + "docstring": "Clean up any resources that were allocated during the agent's work. Args: death_sig: Signal to send to the child process, SIGTERM is default", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", + "ast_data": "FunctionDef name:_shutdown arg:self arg:death_sig arguments arg arg Raise" + }, + { + "library": "scrapy", + "name": "is_generator_with_return_value", + "source_code": "def is_generator_with_return_value(callable: Callable[..., Any]) -> bool:\n if callable in _generator_callbacks_cache:\n return bool(_generator_callbacks_cache[callable])\n\n def returns_none(return_node: ast.Return) -> bool:\n value = return_node.value\n return value is None or (isinstance(value, ast.Constant) and value.value is None)\n if inspect.isgeneratorfunction(callable):\n func = callable\n while isinstance(func, partial):\n func = func.func\n src = inspect.getsource(func)\n pattern = re.compile('(^[\\\\t ]+)')\n code = pattern.sub('', src)\n match = pattern.match(src)\n if match:\n code = re.sub(f'\\n{match.group(0)}', '\\n', code)\n tree = ast.parse(code)\n for node in walk_callable(tree):\n if isinstance(node, ast.Return) and (not returns_none(node)):\n _generator_callbacks_cache[callable] = True\n return bool(_generator_callbacks_cache[callable])\n _generator_callbacks_cache[callable] = False\n return bool(_generator_callbacks_cache[callable])", + "docstring": "Returns True if a callable is a generator function which includes a 'return' statement with a value different than None, False otherwise", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:is_generator_with_return_value arg:callable arguments arg If Compare Return return:yes Call FunctionDef name:returns_none arg:return_node arguments arg Assign Return return:yes BoolOp Compare BoolOp Call Compare If Call Assign While Call Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Call Assign Call For Call If BoolOp Call Call Assign Return return:yes Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_new_global_index", + "source_code": "def _get_new_global_index(self, index_override):\n if index_override is None:\n global_index = self._next_global_index\n else:\n if index_override in self._used_global_indices:\n raise ValueError('Index %d was already used by another call to add')\n global_index = index_override\n self._used_global_indices.add(global_index)\n while self._next_global_index in self._used_global_indices:\n self._next_global_index += 1\n return global_index", + "docstring": "Return the next unused argument index in order or use an override. Args: index_override: An index to use instead of the next available or None to use the next available. Returns: A valid global_index to use for the next hint argument. Raises: ValueError: If the index_override is already used by another hint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:_get_new_global_index arg:self arg:index_override arguments arg arg If Compare Assign If Compare Raise Call Assign Call While Compare Return return:yes" + }, + { + "library": "numpy", + "name": "__mul__", + "source_code": "def __mul__(self, i):\n return asarray(multiply(self, i))", + "docstring": "Return (self * i), that is string multiple concatenation, element-wise. See Also -------- multiply", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:__mul__ arg:self arg:i arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "make_call_generated_code", + "source_code": "def make_call_generated_code(self, fn_name: str) -> None:\n self.extend_output(self.load_function_name(fn_name, True))\n graphargs = self.tx.output.graphargs\n seen_sources: OrderedSet[Source] = OrderedSet()\n\n def collect_temp_source(source):\n if source in seen_sources:\n self.mark_source_temp(source)\n return\n seen_sources.add(source)\n if isinstance(source, ChainedSource):\n collect_temp_source(source.base)\n if isinstance(source, DictGetItemSource) and isinstance(source.index, Source):\n collect_temp_source(source.index)\n for arg in graphargs:\n if arg.source is not None:\n collect_temp_source(arg.source)\n for arg in graphargs:\n if arg.pass_arg_as_tensor:\n self.add_push_null(lambda: self.extend_output([self.create_load_python_module(torch), self.create_load_attr('_as_tensor_fullprec')]))\n self.call_reconstruct(arg)\n self.extend_output(create_call_function(1, False))\n else:\n self.call_reconstruct(arg)\n self.extend_output(create_call_function(len(graphargs), False))", + "docstring": "Call the generated code function stored in fn_name", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\codegen.py", + "ast_data": "FunctionDef name:make_call_generated_code arg:self arg:fn_name arguments arg arg Call Call Assign Call FunctionDef name:collect_temp_source arg:source arguments arg If Compare Call Return return:no Call If Call Call If BoolOp Call Call Call For If Compare Call For If Call arguments Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "checkpoint_exists_internal", + "source_code": "def checkpoint_exists_internal(checkpoint_prefix):\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix, saver_pb2.SaverDef.V2)\n if file_io.get_matching_files(pathname):\n return True\n elif file_io.get_matching_files(checkpoint_prefix):\n return True\n else:\n return False", + "docstring": "Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is an internal function to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of or that of , regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by exists.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py", + "ast_data": "FunctionDef name:checkpoint_exists_internal arg:checkpoint_prefix arguments arg Assign Call If Call Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "IrregularRenderer", + "source_code": "class IrregularRenderer(VolumeRenderer):\n\n def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n t_vals = calc_ray_t_vals(points_3d)\n deltas = t_vals[..., 1:] - t_vals[..., :-1]\n far = torch.empty(size=t_vals.shape[:-1], dtype=t_vals.dtype, device=t_vals.device).fill_(self._huge)\n deltas = torch.cat([deltas, far[..., None]], dim=-1)\n alpha = 1 - torch.exp(-1.0 * densities * deltas[..., None])\n return self._render(alpha, rgbs)", + "docstring": "Renders 3D irregularly sampled points along rays.", + "type": "class", + "file_path": "kornia\\kornia\\nerf\\volume_renderer.py", + "ast_data": "ClassDef name:IrregularRenderer FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Assign Call Assign Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_decode_helper", + "source_code": "def _decode_helper(obj):\n if isinstance(obj, dict) and 'class_name' in obj:\n if obj['class_name'] == 'TensorShape':\n return tensor_shape.TensorShape(obj['items'])\n elif obj['class_name'] == 'TypeSpec':\n return type_spec_registry.lookup(obj['type_spec'])._deserialize(_decode_helper(obj['serialized']))\n elif obj['class_name'] == '__tuple__':\n return tuple((_decode_helper(i) for i in obj['items']))\n elif obj['class_name'] == '__ellipsis__':\n return Ellipsis\n return obj", + "docstring": "A decoding helper that is TF-object aware.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\json_utils.py", + "ast_data": "FunctionDef name:_decode_helper arg:obj arguments arg If BoolOp Call Compare If Compare Return return:yes Call If Compare Return return:yes Call Call Call If Compare Return return:yes Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "memory_allocated", + "source_code": "def memory_allocated(device: _device_t=None) -> int:\n return memory_stats(device=device).get('allocated_bytes.all.current', 0)", + "docstring": "Return the current GPU memory occupied by tensors in bytes for a given device. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `xpu-smi` since some unused memory can be held by the caching allocator and some context needs to be created on GPU.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\memory.py", + "ast_data": "FunctionDef name:memory_allocated arg:device arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Poisson", + "source_code": "class Poisson(MeanMetricWrapper):\n\n def __init__(self, name='poisson', dtype=None):\n super(Poisson, self).__init__(poisson, name, dtype=dtype)", + "docstring": "Computes the Poisson metric between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.99999994 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:Poisson FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "value", + "source_code": "@property\ndef value(self):\n return self._value", + "docstring": "The value of this dimension, or None if it is unknown.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_ps_failure", + "source_code": "def _is_ps_failure(error):\n if isinstance(error, PSUnavailableError):\n return True\n if isinstance(error, (ClosureInputError, ClosureAbortedError)):\n error = error.original_exception\n if _RPC_ERROR_FROM_PS not in str(error):\n return False\n if isinstance(error, (errors.UnavailableError, errors.AbortedError)):\n return True\n if isinstance(error, errors.InvalidArgumentError):\n if 'unknown device' in str(error).lower() or 'Unable to find the relevant tensor remote_handle' in str(error):\n return True\n return False", + "docstring": "Whether the error is considered a parameter server failure.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_is_ps_failure arg:error arguments arg If Call Return return:yes If Call Assign If Compare Call Return return:yes If Call Return return:yes If Call If BoolOp Compare Call Call Compare Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, labels, centers, scores):\n self.left = _BisectingTree(indices=self.indices[labels == 0], center=centers[0], score=scores[0])\n self.right = _BisectingTree(indices=self.indices[labels == 1], center=centers[1], score=scores[1])\n self.indices = None", + "docstring": "Split the cluster node into two subclusters.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py", + "ast_data": "FunctionDef name:split arg:self arg:labels arg:centers arg:scores arguments arg arg arg arg Assign Call Compare Assign Call Compare Assign" + }, + { + "library": "tensorflow", + "name": "list_objects", + "source_code": "def list_objects(graph_view, skip_slot_variables=False):\n trackable_objects = objects_ids_and_slot_variables_and_paths(graph_view, skip_slot_variables)[0]\n return trackable_objects", + "docstring": "Traverse the object graph and list all accessible objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py", + "ast_data": "FunctionDef name:list_objects arg:graph_view arg:skip_slot_variables arguments arg arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "parse_readable_size_str", + "source_code": "def parse_readable_size_str(size_str):\n size_str = size_str.strip()\n if size_str.endswith('B'):\n size_str = size_str[:-1]\n if size_str.isdigit():\n return int(size_str)\n elif size_str.endswith('k'):\n return int(float(size_str[:-1]) * 1024)\n elif size_str.endswith('M'):\n return int(float(size_str[:-1]) * 1048576)\n elif size_str.endswith('G'):\n return int(float(size_str[:-1]) * 1073741824)\n else:\n raise ValueError('Failed to parsed human-readable byte size str: \"%s\"' % size_str)", + "docstring": "Convert a human-readable str representation to number of bytes. Only the units \"kB\", \"MB\", \"GB\" are supported. The \"B character at the end of the input may be omitted. Args: size_str: () A human-readable str representing a number of bytes (e.g., \"0\", \"1023\", \"1.1kB\", \"24 MB\", \"23GB\", \"100 G\". Returns: () The parsed number of bytes. Raises: ValueError: on failure to parse the input .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py", + "ast_data": "FunctionDef name:parse_readable_size_str arg:size_str arguments arg Assign Call If Call Assign If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "fuse_linear_bn_eval", + "source_code": "def fuse_linear_bn_eval(linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm) -> LinearT:\n assert not (linear.training or bn.training), 'Fusion only for eval!'\n fused_linear = copy.deepcopy(linear)\n '\\n Linear-BN needs to be fused while preserving the shapes of linear weight/bias.\\n To preserve the shapes of linear weight/bias, the channel dim of bn needs to be broadcastable with the last dim of linear,\\n because bn operates over the channel dim, (N, C_in, H, W) while linear operates over the last dim, (*, H_in).\\n To be broadcastable, the number of features in bn and\\n the number of output features from linear must satisfy the following condition:\\n 1. they are equal, or\\n 2. the number of features in bn is 1\\n Otherwise, skip the folding path\\n '\n assert linear.out_features == bn.num_features or bn.num_features == 1, 'To fuse, linear.out_features == bn.num_features or bn.num_features == 1'\n assert bn.running_mean is not None and bn.running_var is not None\n fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights(fused_linear.weight, fused_linear.bias, bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias)\n return fused_linear", + "docstring": "Fuse a linear module and a BatchNorm module into a single, new linear module. Args: linear (torch.nn.Linear): A Linear module. bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. Returns: torch.nn.Linear: The fused linear module. .. note:: Both `` must have its running buffers computed.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\fusion.py", + "ast_data": "FunctionDef name:fuse_linear_bn_eval arg:linear arg:bn arguments arg arg BoolOp Assign Call BoolOp Compare Compare BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "decorate", + "source_code": "def decorate(self, content: StringList) -> None:\n prepend_prolog(content, self.config.rst_prolog)\n append_epilog(content, self.config.rst_epilog)", + "docstring": "Preprocess reST content before parsing.", + "type": "method", + "file_path": "sphinx\\sphinx\\parsers.py", + "ast_data": "FunctionDef name:decorate arg:self arg:content arguments arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "get_collection", + "source_code": "@tf_export(v1=['get_collection'])\ndef get_collection(key, scope=None) -> list[Any]:\n return get_default_graph().get_collection(key, scope)", + "docstring": "Wrapper for using the default graph. See for more details. Args: key: The key for the collection. For example, the class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied and the choice or means that a without special tokens filters by prefix. Returns: The list of values in the collection with the given , or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_collection arg:key arg:scope arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_estimator_has", + "source_code": "def _estimator_has(attr):\n\n def check(self):\n getattr(self.estimator, attr)\n return True\n return check", + "docstring": "Check that final_estimator has . Used together with .", + "type": "function", + "file_path": "scikit-learn\\sklearn\\frozen\\_frozen.py", + "ast_data": "FunctionDef name:_estimator_has arg:attr arguments arg FunctionDef name:check arg:self arguments arg Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "view_to_reshape", + "source_code": "def view_to_reshape(gm):\n subgraph_names: OrderedSet[str] = OrderedSet((x.target for x in gm.graph.find_nodes(op='get_attr')))\n for child_name, child_mod in gm.named_children():\n if child_name in subgraph_names and isinstance(child_mod, torch.fx.GraphModule):\n view_to_reshape(child_mod)\n for nd in gm.graph.find_nodes(op='call_function', target=torch.ops.aten.view.default):\n nd.target = torch.ops.aten.reshape.default", + "docstring": "Replace view ops in the GraphModule to reshape ops.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py", + "ast_data": "FunctionDef name:view_to_reshape arg:gm arguments arg Call Call For Call If BoolOp Compare Call Call For Call Assign" + }, + { + "library": "django", + "name": "password_change_done", + "source_code": "def password_change_done(self, request, extra_context=None):\n from django.contrib.auth.views import PasswordChangeDoneView\n defaults = {'extra_context': {**self.each_context(request), **(extra_context or {})}}\n if self.password_change_done_template is not None:\n defaults['template_name'] = self.password_change_done_template\n request.current_app = self.name\n return PasswordChangeDoneView.as_view(**defaults)(request)", + "docstring": "Display the \"success\" page after a password change.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:password_change_done arg:self arg:request arg:extra_context arguments arg arg arg Assign Call BoolOp If Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "CitationDefinitionTransform", + "source_code": "class CitationDefinitionTransform(SphinxTransform):\n default_priority = 619\n\n def apply(self, **kwargs: Any) -> None:\n domain = self.env.domains.citation_domain\n for node in self.document.findall(nodes.citation):\n node['docname'] = self.env.docname\n domain.note_citation(node)\n label = cast('nodes.label', node[0])\n label['support_smartquotes'] = False", + "docstring": "Mark citation definition labels as not smartquoted.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\citation.py", + "ast_data": "ClassDef name:CitationDefinitionTransform Assign FunctionDef name:apply arg:self arguments arg arg Assign For Call Assign Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load_from_saved_model", + "source_code": "def load_from_saved_model(saved_model_path, custom_objects=None):\n warnings.warn('`tf.keras.experimental.load_from_saved_model` is deprecatedand will be removed in a future version. Please switch to `tf.keras.models.load_model`.')\n model_json_filepath = os.path.join(compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(SAVED_MODEL_FILENAME_JSON))\n with gfile.Open(model_json_filepath, 'r') as f:\n model_json = f.read()\n model = model_config.model_from_json(model_json, custom_objects=custom_objects)\n checkpoint_prefix = os.path.join(compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME))\n model.load_weights(checkpoint_prefix)\n return model", + "docstring": "Loads a keras Model from a SavedModel created by . This function reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Example: Args: saved_model_path: a string specifying the path to an existing SavedModel. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: a keras.Model instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py", + "ast_data": "FunctionDef name:load_from_saved_model arg:saved_model_path arg:custom_objects arguments arg arg Call Assign Call Call Call Call With Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "BBOverlapsLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass BBOverlapsLookup(GISLookup):\n lookup_name = 'bboverlaps'", + "docstring": "The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:BBOverlapsLookup Assign" + }, + { + "library": "django", + "name": "url", + "source_code": "def url(self, name, force=False):\n return self._url(self.stored_name, name, force)", + "docstring": "Return the non-hashed URL in DEBUG mode.", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\storage.py", + "ast_data": "FunctionDef name:url arg:self arg:name arg:force arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "param_shapes", + "source_code": "@classmethod\ndef param_shapes(cls, sample_shape, name='DistributionParamShapes'):\n with ops.name_scope(name, values=[sample_shape]):\n return cls._param_shapes(sample_shape)", + "docstring": "Shapes of parameters given the desired shape of a call to . This is a class method that describes what key/value arguments are required to instantiate the given so that a particular shape is returned for that instance's call to . Subclasses should override class method . Args: sample_shape: or python list/tuple. Desired shape of a call to . name: name to prepend ops with. Returns: of parameter name to shapes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:param_shapes arg:cls arg:sample_shape arg:name arguments arg arg arg With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, node_def, op, message, *args):\n super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND, *args)", + "docstring": "Creates a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n raise NotImplementedError", + "docstring": "The name of this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Raise" + }, + { + "library": "matplotlib", + "name": "get_xbound", + "source_code": "def get_xbound(self):\n left, right = self.get_xlim()\n if left < right:\n return (left, right)\n else:\n return (right, left)", + "docstring": "Return the lower and upper x-axis bounds, in increasing order. See Also -------- set_xbound get_xlim, set_xlim invert_xaxis, xaxis_inverted", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_xbound arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "_check_field_name", + "source_code": "def _check_field_name(self):\n if self.name is None:\n return []\n if self.name.endswith('_'):\n return [checks.Error('Field names must not end with an underscore.', obj=self, id='fields.E001')]\n elif LOOKUP_SEP in self.name:\n return [checks.Error('Field names must not contain \"%s\".' % LOOKUP_SEP, obj=self, id='fields.E002')]\n elif self.name == 'pk':\n return [checks.Error(\"'pk' is a reserved word that cannot be used as a field name.\", obj=self, id='fields.E003')]\n else:\n return []", + "docstring": "Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain \"__\" and 3) is not \"pk\".", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:_check_field_name arg:self arguments arg If Compare Return return:no If Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "is_tf32_supported", + "source_code": "def is_tf32_supported() -> bool:\n if torch.version.hip:\n prop_name = torch.cuda.get_device_properties().gcnArchName\n archs = ('gfx94', 'gfx95')\n for arch in archs:\n if arch in prop_name:\n return True\n return False\n return is_bf16_supported(including_emulation=False)", + "docstring": "Return a bool indicating if the current CUDA/ROCm device supports dtype tf32.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:is_tf32_supported arguments If Assign Call Assign For If Compare Return return:yes Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "track_variable", + "source_code": "def track_variable(v):\n if context.executing_eagerly():\n return\n graph = v.graph if hasattr(v, 'graph') else get_graph()\n _GRAPH_VARIABLES[graph].add(v)", + "docstring": "Tracks the given variable for initialization.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:track_variable arg:v arguments arg If Call Return return:no Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "divide", + "source_code": "@tf_export('math.divide', 'divide')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef divide(x, y, name=None):\n if name is not None:\n return DivideDelegateWithName(x, name) / y\n else:\n if not tensor_util.is_tf_type(x):\n dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None\n x = ops.convert_to_tensor(x, dtype=dtype)\n return x / y", + "docstring": "Computes Python style division of by . For example: >>> x = tf.constant([16, 12, 11]) >>> y = tf.constant([4, 6, 2]) >>> tf.divide(x,y) Args: x: A y: A name: A name for the operation (optional). Returns: A with same shape as input", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:divide arg:x arg:y arg:name arguments arg arg arg If Compare Return return:yes Call If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "serialize_to_string", + "source_code": "def serialize_to_string(self):\n return print_mdl.SerializeToString()", + "docstring": "Serialize the ProfileProto to a binary string. Users can write it to file for offline analysis by tfprof commandline or graphical interface. Returns: ProfileProto binary string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\model_analyzer.py", + "ast_data": "FunctionDef name:serialize_to_string arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "validate_autopk_value", + "source_code": "def validate_autopk_value(self, value):\n return value", + "docstring": "Certain backends do not accept some values for \"serial\" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:validate_autopk_value arg:self arg:value arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "single_method_decorator", + "source_code": "def single_method_decorator(f):\n\n @parameterized.named_parameters(*params)\n @functools.wraps(f)\n def decorated(self, saved_format, *args, **kwargs):\n if saved_format == 'h5':\n _test_h5_saved_model_format(f, self, *args, **kwargs)\n elif saved_format == 'tf':\n _test_tf_saved_model_format(f, self, *args, **kwargs)\n elif saved_format == 'tf_no_traces':\n _test_tf_saved_model_format_no_traces(f, self, *args, **kwargs)\n else:\n raise ValueError('Unknown model type: %s' % (saved_format,))\n return decorated", + "docstring": "Decorator that constructs the test cases.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py", + "ast_data": "FunctionDef name:single_method_decorator arg:f arguments arg FunctionDef name:decorated arg:self arg:saved_format arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_execution_function", + "source_code": "def _make_execution_function(model, mode, class_weight=None):\n if mode == ModeKeys.TRAIN:\n f = functools.partial(model.train_on_batch, class_weight=class_weight)\n elif mode == ModeKeys.TEST:\n f = model.test_on_batch\n else:\n\n def predict_on_batch(x, y=None, sample_weights=None):\n return model.predict_on_batch(x)\n f = predict_on_batch\n if mode != ModeKeys.PREDICT:\n f = functools.partial(f, reset_metrics=False)\n return f", + "docstring": "Makes function to run one step of model execution.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py", + "ast_data": "FunctionDef name:_make_execution_function arg:model arg:mode arg:class_weight arguments arg arg arg If Compare Assign Call If Compare Assign FunctionDef name:predict_on_batch arg:x arg:y arg:sample_weights arguments arg arg arg Return return:yes Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "django", + "name": "__setitem__", + "source_code": "def __setitem__(self, key, value):\n self.dicts[-1][key] = value", + "docstring": "Set a variable in the current context", + "type": "method", + "file_path": "django\\django\\template\\context.py", + "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign" + }, + { + "library": "pytorch", + "name": "pack_padded_sequence", + "source_code": "def pack_padded_sequence(input: Tensor, lengths: Union[Tensor, list[int]], batch_first: bool=False, enforce_sorted: bool=True) -> PackedSequence:\n if not isinstance(lengths, torch.Tensor):\n if torch._C._get_tracing_state():\n warnings.warn('pack_padded_sequence has been called with a Python list of sequence lengths. The tracer cannot track the data flow of Python values, and it will treat them as constants, likely rendering the trace incorrect for any other combination of lengths.', stacklevel=2)\n lengths = torch.as_tensor(lengths, dtype=torch.int64, device='cpu')\n else:\n lengths = lengths.to(dtype=torch.int64)\n if enforce_sorted:\n sorted_indices = None\n else:\n lengths, sorted_indices = torch.sort(lengths, descending=True)\n sorted_indices = sorted_indices.to(input.device)\n batch_dim = 0 if batch_first else 1\n input = input.index_select(batch_dim, sorted_indices)\n data, batch_sizes = _VF._pack_padded_sequence(input, lengths, batch_first)\n return _packed_sequence_init(data, batch_sizes, sorted_indices, None)", + "docstring": "Packs a Tensor containing padded sequences of variable length. :attr: can be of size `batch_firstbatch_firstenforce_sorted = Falseenforce_sortedenforce_sorted = Truepad_packed_sequencepad_packed_sequencePackedSequencePackedSequencePackedSequence` object", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\rnn.py", + "ast_data": "FunctionDef name:pack_padded_sequence arg:input arg:lengths arg:batch_first arg:enforce_sorted arguments arg arg arg arg If Call If Call Call Assign Call Assign Call If Assign Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "bazel_command", + "source_code": "def bazel_command(self, subcommand: str='test', extra_options: Tuple[str, ...]=()) -> List[str]:\n options = _dict_to_cli_options(self.options)\n configs = [f'--config={config}' for config in self.configs]\n build_tag_filters = f'--build_tag_filters={','.join(self.build_tag_filters)}'\n test_tag_filters = f'--test_tag_filters={','.join(self.test_tag_filters)}'\n action_env = [f'--action_env={k}={v}' for k, v in self.action_env.items()]\n test_env = [f'--test_env={k}={v}' for k, v in self.test_env.items()]\n repo_env = [f'--repo_env={k}={v}' for k, v in self.repo_env.items()]\n override_repository = [f'--override_repository={k}={v}' for k, v in self.override_repository.items()]\n tag_filters = [build_tag_filters, test_tag_filters]\n all_options = tag_filters + configs + action_env + test_env + repo_env + override_repository + options + list(extra_options)\n return ['bazel', subcommand, *all_options, '--', *self.target_patterns]", + "docstring": "Returns a bazel test command for this build. Args: subcommand: The subcommand to give to bazel. by default. extra_options: Extra options. For now just used to pass in . Returns: List of command line arguments", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py", + "ast_data": "FunctionDef name:bazel_command arg:self arg:subcommand arg:extra_options arguments arg arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "LayoutEngine", + "source_code": "class LayoutEngine:\n _adjust_compatible = None\n _colorbar_gridspec = None\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._params = {}\n\n def set(self, **kwargs):\n raise NotImplementedError\n\n @property\n def colorbar_gridspec(self):\n if self._colorbar_gridspec is None:\n raise NotImplementedError\n return self._colorbar_gridspec\n\n @property\n def adjust_compatible(self):\n if self._adjust_compatible is None:\n raise NotImplementedError\n return self._adjust_compatible\n\n def get(self):\n return dict(self._params)\n\n def execute(self, fig):\n raise NotImplementedError", + "docstring": "Base class for Matplotlib layout engines. A layout engine can be passed to a figure at instantiation or at any time with . Once attached to a figure, the layout engine `~.figure.Figure.draw~.figure.Figure.set_layout_engineLayoutEngine.Figure.colorbar.colorbar.make_axes_gridspec.colorbar.make_axes.Figure.subplots_adjustLayoutEngineLayoutEngine.setLayoutEngine.execute` with your implementation", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", + "ast_data": "ClassDef name:LayoutEngine Assign Assign FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign FunctionDef name:set arg:self arguments arg arg Raise FunctionDef name:colorbar_gridspec arg:self arguments arg If Compare Raise Return return:yes FunctionDef name:adjust_compatible arg:self arguments arg If Compare Raise Return return:yes FunctionDef name:get arg:self arguments arg Return return:yes Call FunctionDef name:execute arg:self arg:fig arguments arg arg Raise" + }, + { + "library": "pytorch", + "name": "LPPool1d", + "source_code": "class LPPool1d(_LPPoolNd):\n kernel_size: _size_1_t\n stride: _size_1_t\n\n def forward(self, input: Tensor) -> Tensor:\n return F.lp_pool1d(input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode)", + "docstring": "Applies a 1D power-average pooling over an input signal composed of several input planes. On each window, the function computed is: .. math:: f(X) = \\sqrt[p]{\\sum_{x \\in X} x^{p}} - At p = :math:, one gets Max Pooling - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling) .. note:: If the sum to the power of is zero, the gradient of this function is not defined. This implementation will set the gradient to zero in this case. Args: kernel_size: a single int, the size of the window stride: a single int, the stride of the window. Default value is :attr: ceil_mode: when True, will use instead of to compute the output shape Shape: - Input: :math: or :math:. - Output: :math: or :math:, where .. math:: L_{out} = \\left\\lfloor\\frac{L_{in} - \\text{kernel\\_size}}{\\text{stride}} + 1\\right\\rfloor Examples:: >>> # power-2 pool of window of length 3, with stride 2. >>> m = nn.LPPool1d(2, 3, stride=2) >>> input = torch.randn(20, 16, 50) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\pooling.py", + "ast_data": "ClassDef name:LPPool1d FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_transform", + "source_code": "def _transform(self, X):\n return euclidean_distances(X, self.cluster_centers_)", + "docstring": "Guts of transform method; no input validation.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py", + "ast_data": "FunctionDef name:_transform arg:self arg:X arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "Message", + "source_code": "class Message:\n\n def __init__(self, level, message, extra_tags=None):\n self.level = int(level)\n self.message = message\n self.extra_tags = extra_tags\n\n def _prepare(self):\n self.message = str(self.message)\n self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None\n\n def __eq__(self, other):\n if not isinstance(other, Message):\n return NotImplemented\n return self.level == other.level and self.message == other.message\n\n def __str__(self):\n return str(self.message)\n\n def __repr__(self):\n extra_tags = f', extra_tags={self.extra_tags!r}' if self.extra_tags else ''\n return f'Message(level={self.level}, message={self.message!r}{extra_tags})'\n\n @property\n def tags(self):\n return ' '.join((tag for tag in [self.extra_tags, self.level_tag] if tag))\n\n @property\n def level_tag(self):\n return LEVEL_TAGS.get(self.level, '')", + "docstring": "Represent an actual message that can be stored in any of the supported storage classes (typically session- or cookie-based) and rendered in a view or template.", + "type": "class", + "file_path": "django\\django\\contrib\\messages\\storage\\base.py", + "ast_data": "ClassDef name:Message FunctionDef name:__init__ arg:self arg:level arg:message arg:extra_tags arguments arg arg arg arg Assign Call Assign Assign FunctionDef name:_prepare arg:self arguments arg Assign Call Assign Compare Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:tags arg:self arguments arg Return return:yes Call FunctionDef name:level_tag arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "read_parquet", + "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef read_parquet(path: FilePath | ReadBuffer[bytes], engine: str='auto', columns: list[str] | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, filesystem: Any=None, filters: list[tuple] | list[list[tuple]] | None=None, to_pandas_kwargs: dict | None=None, **kwargs) -> DataFrame:\n impl = get_engine(engine)\n check_dtype_backend(dtype_backend)\n return impl.read(path, columns=columns, filters=filters, storage_options=storage_options, dtype_backend=dtype_backend, filesystem=filesystem, to_pandas_kwargs=to_pandas_kwargs, **kwargs)", + "docstring": "Load a parquet object from the file path, returning a DataFrame. The function automatically handles reading the data from a parquet file and creates a DataFrame with the appropriate structure. Parameters ---------- path : str, path object or file-like object String, path object (implementing `DataFrameDataFrameArrowDtypeDataFramekwargsfilterspyarrowenginefilterspyarrow` engine, which can benefit from multithreading and also potentially be more economical in terms of memory. >>> sel = [(\"foo\", \">\", 2)] >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel) >>> restored_part foo bar 0 3 8 1 4 9", + "type": "function", + "file_path": "pandas\\pandas\\io\\parquet.py", + "ast_data": "FunctionDef name:read_parquet arg:path arg:engine arg:columns arg:storage_options arg:dtype_backend arg:filesystem arg:filters arg:to_pandas_kwargs arguments arg arg arg arg arg arg arg arg arg Assign Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_ensure_dtype_type", + "source_code": "def _ensure_dtype_type(value, dtype: np.dtype):\n if dtype == _dtype_obj:\n return value\n return dtype.type(value)", + "docstring": "Ensure that the given value is an instance of the given dtype. e.g. if out dtype is np.complex64_, we should have an instance of that as opposed to a python complex object. Parameters ---------- value : object dtype : np.dtype Returns ------- object", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:_ensure_dtype_type arg:value arg:dtype arguments arg arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "argmax", + "source_code": "def argmax(self, skipna: bool=True) -> int:\n validate_bool_kwarg(skipna, 'skipna')\n if not skipna and self._hasna:\n raise ValueError('Encountered an NA value with skipna=False')\n return nargminmax(self, 'argmax')", + "docstring": "Return the index of maximum value. In case of multiple occurrences of the maximum value, the index corresponding to the first occurrence is returned. Parameters ---------- skipna : bool, default True Returns ------- int See Also -------- ExtensionArray.argmin : Return the index of the minimum value. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argmax() np.int64(3)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:argmax arg:self arg:skipna arguments arg arg Call If BoolOp Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_accessed_time", + "source_code": "def get_accessed_time(self, name):\n raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')", + "docstring": "Return the last accessed time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:get_accessed_time arg:self arg:name arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "hint_int", + "source_code": "def hint_int(a: Union[torch.SymInt, int], fallback: Optional[int]=None) -> int:\n if isinstance(a, torch.SymInt):\n return a.node.require_hint(fallback)\n assert type(a) is int, a\n return a", + "docstring": "Retrieve the hint for an int (based on the underlying real values as observed at runtime). If no hint is available (e.g., because data dependent shapes), if fallback is not None, use that instead (otherwise raise an error).", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:hint_int arg:a arg:fallback arguments arg arg If Call Return return:yes Call Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "register_command_handler", + "source_code": "def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None):\n if not prefix:\n raise ValueError('Empty command prefix')\n if prefix in self._handlers:\n raise ValueError('A handler is already registered for command prefix \"%s\"' % prefix)\n if not callable(handler):\n raise ValueError('handler is not callable')\n if not isinstance(help_info, str):\n raise ValueError('help_info is not a str')\n if prefix_aliases:\n for alias in prefix_aliases:\n if self._resolve_prefix(alias):\n raise ValueError('The prefix alias \"%s\" clashes with existing prefixes or aliases.' % alias)\n self._alias_to_prefix[alias] = prefix\n self._prefix_to_aliases[prefix] = prefix_aliases\n self._handlers[prefix] = handler\n self._prefix_to_help[prefix] = help_info", + "docstring": "Register a callable as a command handler. Args: prefix: Command prefix, i.e., the first word in a command, e.g., \"print\" as in \"print tensor_1\". handler: A callable of the following signature: foo_handler(argv, screen_info=None), where argv is the argument vector (excluding the command prefix) and screen_info is a dictionary containing information about the screen, such as number of columns, e.g., {\"cols\": 100}. The callable should return: 1) a RichTextLines object representing the screen output. The callable can also raise an exception of the type CommandLineExit, which if caught by the command-line interface, will lead to its exit. The exception can optionally carry an exit token of arbitrary type. help_info: A help string. prefix_aliases: Aliases for the command prefix, as a list of str. E.g., shorthands for the command prefix: [\"p\", \"pr\"] Raises: ValueError: If 1) the prefix is empty, or 2) handler is not callable, or 3) a handler is already registered for the prefix, or 4) elements in prefix_aliases clash with existing aliases. 5) help_info is not a str.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:register_command_handler arg:self arg:prefix arg:handler arg:help_info arg:prefix_aliases arguments arg arg arg arg arg If Raise Call If Compare Raise Call If Call Raise Call If Call Raise Call If For If Call Raise Call Assign Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X):\n check_is_fitted(self)\n if self.n_classes_ > 2 and self.multi_class == 'one_vs_one':\n raise ValueError('one_vs_one multi-class mode does not support predicting probability estimates. Use one_vs_rest mode instead.')\n if self.kernel is None or self.kernel.requires_vector_input:\n X = validate_data(self, X, ensure_2d=True, dtype='numeric', reset=False)\n else:\n X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)\n return self.base_estimator_.predict_proba(X)", + "docstring": "Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call If BoolOp Compare Compare Raise Call If BoolOp Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "result_ilocs", + "source_code": "@final\n@cache_readonly\ndef result_ilocs(self) -> npt.NDArray[np.intp]:\n ids = self.ids\n if self.has_dropped_na:\n mask = np.where(ids >= 0)\n null_gaps = np.cumsum(ids == -1)[mask]\n ids = ids[mask]\n result = get_group_index_sorter(ids, self.ngroups)\n if self.has_dropped_na:\n result += np.take(null_gaps, result)\n return result", + "docstring": "Get the original integer locations of result_index in the input.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:result_ilocs arg:self arguments arg Assign If Assign Call Compare Assign Call Compare Assign Assign Call If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "add_to_optionally_restored", + "source_code": "def add_to_optionally_restored(self, var):\n self._optionally_restored.append(var)", + "docstring": "Add a variable to the list of optionally restored variables. There are situations where certain variables should be ignored in assertions such as assert_existing_objects_matched(). One example is that of a checkpoint saved with train.Saver(), and restored with train.Checkpoint(): it is possible for the train.Saver() checkpoint to be missing the internal variable, which we want to ignore on restore. Args: var: The variable to treat as optionally restored.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:add_to_optionally_restored arg:self arg:var arguments arg arg Call" + }, + { + "library": "django", + "name": "RequestAborted", + "source_code": "class RequestAborted(Exception):\n pass", + "docstring": "The request was closed before it was completed, or timed out.", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:RequestAborted" + }, + { + "library": "pytorch", + "name": "CUDASanitizerErrors", + "source_code": "class CUDASanitizerErrors(Exception):\n\n def __init__(self, errors: list[SynchronizationError]):\n self.errors = errors\n\n def __str__(self):\n return f'detected {len(self.errors)} errors'", + "docstring": "Wrapper class for errors reported by CUDA Sanitizer.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", + "ast_data": "ClassDef name:CUDASanitizerErrors FunctionDef name:__init__ arg:self arg:errors arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "transform_index_select", + "source_code": "@register_transformation_rule(IndexSelect)\ndef transform_index_select(constraint, counter):\n dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n is_valid_index = valid_index(constraint.index, dims)\n nat_constraints = gen_nat_constraints(dims)\n if is_valid_index == T():\n new_dims = copy.deepcopy(dims)\n new_dims[constraint.index] = constraint.dim_replace\n transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), *nat_constraints, is_valid_index, BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])\n return (transformed_constraint, counter)", + "docstring": "The constraints consider the given tensor size, checks if the index is valid and if so, generates a constraint for replacing the input dimension with the required dimension", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:transform_index_select arg:constraint arg:counter arguments arg arg Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "split", + "source_code": "@tf_should_use.should_use_result\ndef split(self, value, lengths, name=None):\n return self._implementation.split(value, lengths, name=name)", + "docstring": "Split the values of a into the TensorArray. Args: value: (N+1)-D. Tensor of type . The Tensor to split. lengths: 1-D. int32 vector with the lengths to use when splitting along its first dimension. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the split occurs. Use this object for all subsequent operations. Raises: ValueError: if the shape inference fails.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:split arg:self arg:value arg:lengths arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_unbatch", + "source_code": "@abc.abstractmethod\ndef _unbatch(self) -> TypeSpec:\n raise NotImplementedError(f'{type(self).__name__}._unbatch')", + "docstring": "Returns a TypeSpec representing a single element this TypeSpec. Returns: A representing a single element of objects with this TypeSpec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_unbatch arg:self arguments arg Raise Call Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, alpha, size=1, random_state=None):\n alpha = _dirichlet_check_parameters(alpha)\n random_state = self._get_random_state(random_state)\n return random_state.dirichlet(alpha, size=size)", + "docstring": "Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random variable.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:alpha arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "codes", + "source_code": "@property\ndef codes(self) -> FrozenList:\n return self._codes", + "docstring": "Codes of the MultiIndex. Codes are the position of the index value in the list of level values for each level. Returns ------- tuple of numpy.ndarray The codes of the MultiIndex. Each array in the tuple corresponds to a level in the MultiIndex. See Also -------- MultiIndex.set_codes : Set new codes on MultiIndex. Examples -------- >>> arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]] >>> mi = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\")) >>> mi.codes FrozenList([[0, 0, 1, 1], [1, 0, 1, 0]])", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_str_bbox", + "source_code": "def get_str_bbox(self, s):\n return self.get_str_bbox_and_descent(s)[:4]", + "docstring": "Return the string bounding box.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:get_str_bbox arg:self arg:s arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_item", + "source_code": "def get_item(target, i, opts):\n assert isinstance(opts, GetItemOpts)\n if isinstance(target, tensor_array_ops.TensorArray):\n return _tf_tensorarray_get_item(target, i)\n elif tensor_util.is_tf_type(target):\n if target.dtype == dtypes.variant:\n return _tf_tensor_list_get_item(target, i, opts)\n elif target.dtype == dtypes.string and target.shape.ndims == 0:\n return _tf_tensor_string_get_item(target, i)\n else:\n return _tf_tensor_get_item(target, i)\n else:\n return _py_get_item(target, i)", + "docstring": "The slice read operator (i.e. __getitem__). Note: it is unspecified whether target will be mutated or not. In general, if target is mutable (like Python lists), it will be mutated. Args: target: An entity that supports getitem semantics. i: Index to read from. opts: A GetItemOpts object. Returns: The read element. Raises: ValueError: if target is not of a supported type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py", + "ast_data": "FunctionDef name:get_item arg:target arg:i arg:opts arguments arg arg arg Call If Call Return return:yes Call If Call If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dot", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef dot(x, y):\n if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):\n x_shape = []\n for i, s in zip(int_shape(x), array_ops_stack.unstack(array_ops.shape(x))):\n if i is not None:\n x_shape.append(i)\n else:\n x_shape.append(s)\n x_shape = tuple(x_shape)\n y_shape = []\n for i, s in zip(int_shape(y), array_ops_stack.unstack(array_ops.shape(y))):\n if i is not None:\n y_shape.append(i)\n else:\n y_shape.append(s)\n y_shape = tuple(y_shape)\n y_permute_dim = list(range(ndim(y)))\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\n xt = array_ops.reshape(x, [-1, x_shape[-1]])\n yt = array_ops.reshape(array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\n return array_ops.reshape(math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\n if is_sparse(x):\n out = sparse_ops.sparse_tensor_dense_matmul(x, y)\n else:\n out = math_ops.matmul(x, y)\n return out", + "docstring": "Multiplies 2 tensors (and/or variables) and returns a tensor. This operation corresponds to . Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor, dot product of and . Examples: If inputs and are 2-D arrays, then it is equivalent to . >>> x = tf.keras.backend.placeholder(shape=(2, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy If is an N-D array and is an M-D array (where M>=2), it is a sum product over the last axis of and the second-to-last axis of . >>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = tf.keras.backend.ones((4, 3, 5)) >>> xy = tf.keras.backend.dot(x, y) >>> tf.keras.backend.int_shape(xy) (2, 4, 5)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:dot arg:x arg:y arguments arg arg If BoolOp Compare Call BoolOp Compare Call Compare Call Assign For Call Call Call Call If Compare Call Call Assign Call Assign For Call Call Call Call If Compare Call Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "__call__", + "source_code": "def __call__(self, text, trim_url_limit=None, nofollow=False, autoescape=False):\n safe_input = isinstance(text, SafeData)\n words = self.word_split_re.split(str(text))\n local_cache = {}\n urlized_words = []\n for word in words:\n if (urlized_word := local_cache.get(word)) is None:\n urlized_word = self.handle_word(word, safe_input=safe_input, trim_url_limit=trim_url_limit, nofollow=nofollow, autoescape=autoescape)\n local_cache[word] = urlized_word\n urlized_words.append(urlized_word)\n return ''.join(urlized_words)", + "docstring": "If trim_url_limit is not None, truncate the URLs in the link text longer than this limit to trim_url_limit - 1 characters and append an ellipsis. If nofollow is True, give the links a rel=\"nofollow\" attribute. If autoescape is True, autoescape the link text and URLs.", + "type": "method", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:text arg:trim_url_limit arg:nofollow arg:autoescape arguments arg arg arg arg arg Assign Call Assign Call Call Assign Assign For If Compare Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "node_exists", + "source_code": "def node_exists(self, node_name, device_name=None):\n if not self._debug_graphs:\n raise LookupError('Nodes have not been loaded from partition graphs yet.')\n if device_name is not None and device_name not in self._debug_graphs:\n raise ValueError(\"The specified device_name '%s' cannot be found.\" % device_name)\n for _, debug_graph in self._debug_graphs.items():\n if node_name in debug_graph.node_inputs:\n return True\n return False", + "docstring": "Test if a node exists in the partition graphs. Args: node_name: () name of the node to be checked. device_name: optional device name. If None, will search for the node on all available devices. Otherwise, search for the node only on the given device. Returns: A boolean indicating whether the node exists. Raises: LookupError: If no partition graphs have been loaded yet. ValueError: If device_name is specified but cannot be found.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:node_exists arg:self arg:node_name arg:device_name arguments arg arg arg If Raise Call If BoolOp Compare Compare Raise Call For Call If Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@abstractmethod\ndef fit(self, X, y, **fit_params):\n names, clfs = self._validate_estimators()\n if self.weights is not None and len(self.weights) != len(self.estimators):\n raise ValueError(f'Number of `estimators` and weights must be equal; got {len(self.weights)} weights, {len(self.estimators)} estimators')\n if _routing_enabled():\n routed_params = process_routing(self, 'fit', **fit_params)\n else:\n routed_params = Bunch()\n for name in names:\n routed_params[name] = Bunch(fit={})\n if 'sample_weight' in fit_params:\n routed_params[name].fit['sample_weight'] = fit_params['sample_weight']\n self.estimators_ = Parallel(n_jobs=self.n_jobs)((delayed(_fit_single_estimator)(clone(clf), X, y, fit_params=routed_params[name]['fit'], message_clsname='Voting', message=self._log_message(name, idx + 1, len(clfs))) for idx, (name, clf) in enumerate(zip(names, clfs)) if clf != 'drop'))\n self.named_estimators_ = Bunch()\n est_iter = iter(self.estimators_)\n for name, est in self.estimators:\n current_est = est if est == 'drop' else next(est_iter)\n self.named_estimators_[name] = current_est\n if hasattr(current_est, 'feature_names_in_'):\n self.feature_names_in_ = current_est.feature_names_in_\n return self", + "docstring": "Get common fit operations.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Call Raise Call Call Call If Call Assign Call Assign Call For Assign Call If Compare Assign Assign Call Call Call Call Call Call Call Call Call Compare Assign Call Assign Call For Assign Compare Call Assign If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "slice_inputs", + "source_code": "def slice_inputs(self, indices_dataset, inputs):\n dataset = dataset_ops.DatasetV2.zip((indices_dataset, dataset_ops.DatasetV2.from_tensors(inputs).repeat()))\n\n def grab_batch(i, data):\n return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)\n dataset = dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n options = options_lib.Options()\n options.experimental_optimization.apply_default_optimizations = False\n if self._shuffle:\n options.experimental_external_state_policy = options_lib.ExternalStatePolicy.IGNORE\n dataset = dataset.with_options(options)\n return dataset", + "docstring": "Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:slice_inputs arg:self arg:indices_dataset arg:inputs arguments arg arg arg Assign Call Call Call FunctionDef name:grab_batch arg:i arg:data arguments arg arg Return return:yes Call arguments arg Call Assign Call Assign Call Assign If Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_session_tensor", + "source_code": "@tf_export(v1=['get_session_tensor'])\ndef get_session_tensor(handle, dtype, name=None):\n handle_device = TensorHandle._get_device_name(handle)\n with ops.device(handle_device):\n holder = array_ops.placeholder(dtypes.string)\n _register_handle_feeder(holder.graph, holder, dtype)\n tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name)\n return (holder, tensor)", + "docstring": "Get the tensor of type by feeding a tensor handle. This is EXPERIMENTAL and subject to change. Get the value of the tensor from a tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. dtype: The type of the output tensor. name: Optional name prefix for the return tensor. Returns: A pair of tensors. The first is a placeholder for feeding a tensor handle and the second is the tensor in the session state keyed by the tensor handle. Example:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:get_session_tensor arg:handle arg:dtype arg:name arguments arg arg arg Assign Call With Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "AugmentationBase2D", + "source_code": "class AugmentationBase2D(_AugmentationBase):\n\n def validate_tensor(self, input: Tensor) -> None:\n _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n if len(input.shape) != 4:\n raise RuntimeError(f'Expect (B, C, H, W). Got {input.shape}.')\n\n def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n if shape is None:\n return _transform_input(input)\n else:\n return _transform_input_by_shape(input, reference_shape=shape, match_channel=match_channel)", + "docstring": "AugmentationBase2D base class for customized augmentation implementations. AugmentationBase2D aims at offering a generic base class for a greater level of customization. If the subclass contains routined matrix-based transformations, might be a better fit. Args: p: probability for applying an augmentation. This param controls the augmentation probabilities element-wise for a batch. p_batch: probability for applying an augmentation to a batch. This param controls the augmentation probabilities batch-wise. same_on_batch: apply the same transformation across the batch. keepdim: whether to keep the output shape the same as input ``.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py", + "ast_data": "ClassDef name:AugmentationBase2D FunctionDef name:validate_tensor arg:self arg:input arguments arg arg Call If Compare Call Raise Call FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_IndependentConstraint", + "source_code": "class _IndependentConstraint(Constraint):\n\n def __init__(self, base_constraint, reinterpreted_batch_ndims):\n assert isinstance(base_constraint, Constraint)\n assert isinstance(reinterpreted_batch_ndims, int)\n assert reinterpreted_batch_ndims >= 0\n self.base_constraint = base_constraint\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n super().__init__()\n\n @property\n def is_discrete(self) -> bool:\n return self.base_constraint.is_discrete\n\n @property\n def event_dim(self) -> int:\n return self.base_constraint.event_dim + self.reinterpreted_batch_ndims\n\n def check(self, value):\n result = self.base_constraint.check(value)\n if result.dim() < self.reinterpreted_batch_ndims:\n expected = self.base_constraint.event_dim + self.reinterpreted_batch_ndims\n raise ValueError(f'Expected value.dim() >= {expected} but got {value.dim()}')\n result = result.reshape(result.shape[:result.dim() - self.reinterpreted_batch_ndims] + (-1,))\n result = result.all(-1)\n return result\n\n def __repr__(self):\n return f'{self.__class__.__name__[1:]}({repr(self.base_constraint)}, {self.reinterpreted_batch_ndims})'", + "docstring": "Wraps a constraint by aggregating over `check`, so that an event is valid only if all its independent entries are valid.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_IndependentConstraint FunctionDef name:__init__ arg:self arg:base_constraint arg:reinterpreted_batch_ndims arguments arg arg arg Call Call Compare Assign Assign Call Call FunctionDef name:is_discrete arg:self arguments arg Return return:yes FunctionDef name:event_dim arg:self arguments arg Return return:yes FunctionDef name:check arg:self arg:value arguments arg arg Assign Call If Compare Call Assign Raise Call Call Assign Call Call Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "losses", + "source_code": "@property\ndef losses(self):\n collected_losses = []\n for layer in self._flatten_layers():\n if layer._eager_losses:\n if layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER:\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses", + "docstring": "List of losses added using the API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing under a will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [] Returns: A list of tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:losses arg:self arguments arg Assign For Call If If Compare Call Call For Assign Call If Compare Call Return return:yes" + }, + { + "library": "scipy", + "name": "_call_nearest", + "source_code": "def _call_nearest(self, x_new):\n x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)\n x_new_indices = x_new_indices.clip(0, len(self.x) - 1).astype(intp)\n y_new = self._y[x_new_indices]\n return y_new", + "docstring": "Find nearest neighbor interpolated y_new = f(x_new).", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:_call_nearest arg:self arg:x_new arguments arg arg Assign Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "singular_leading_submatrix", + "source_code": "def singular_leading_submatrix(A, U, k):\n delta = np.sum(U[:k - 1, k - 1] ** 2) - A[k - 1, k - 1]\n n = len(A)\n v = np.zeros(n)\n v[k - 1] = 1\n if k != 1:\n v[:k - 1] = solve_triangular(U[:k - 1, :k - 1], -U[:k - 1, k - 1])\n return (delta, v)", + "docstring": "Compute term that makes the leading `A` is added to its element (k, k).", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py", + "ast_data": "FunctionDef name:singular_leading_submatrix arg:A arg:U arg:k arguments arg arg arg Assign Call Assign Call Assign Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, c):\n self.c = c", + "docstring": ":param c: character or number", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:c arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "_get_text_metrics_with_cache", + "source_code": "def _get_text_metrics_with_cache(renderer, text, fontprop, ismath, dpi):\n return _get_text_metrics_with_cache_impl(weakref.ref(renderer), text, fontprop.copy(), ismath, dpi)", + "docstring": "Call ``, caching the results.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:_get_text_metrics_with_cache arg:renderer arg:text arg:fontprop arg:ismath arg:dpi arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "resolve_reference_detect_inventory", + "source_code": "def resolve_reference_detect_inventory(env: BuildEnvironment, node: pending_xref, contnode: TextElement) -> nodes.reference | None:\n resolve_self = env.config.intersphinx_resolve_self\n res = resolve_reference_any_inventory(env, True, node, contnode)\n if res is not None:\n return res\n target = node['reftarget']\n if ':' not in target:\n return None\n inv_name, _, new_target = target.partition(':')\n self_referential = bool(resolve_self) and resolve_self == inv_name\n if self_referential:\n node['reftarget'] = new_target\n node['intersphinx_self_referential'] = True\n return None\n if not inventory_exists(env, inv_name):\n return None\n node['reftarget'] = new_target\n res_inv = resolve_reference_in_inventory(env, inv_name, node, contnode)\n node['reftarget'] = target\n return res_inv", + "docstring": "Attempt to resolve a missing reference via intersphinx references. Resolution is tried first with the target as is in any inventory. If this does not succeed, then the target is split by the first `` is a named inventory, then resolution is tried in that inventory with the new target.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py", + "ast_data": "FunctionDef name:resolve_reference_detect_inventory arg:env arg:node arg:contnode arguments arg arg arg Assign Assign Call If Compare Return return:yes Assign If Compare Return return:no Assign Call Assign BoolOp Call Compare If Assign Assign Return return:no If Call Return return:no Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "zeros_like_impl", + "source_code": "@_tag_zeros_tensor\ndef zeros_like_impl(tensor, dtype, name, optimize=True, layout=None):\n with ops.name_scope(name, 'zeros_like', [tensor]) as name:\n return array_like_impl(zeros, gen_array_ops.zeros_like, tensor, dtype, name, optimize=optimize, layout=layout)", + "docstring": "Internal implementation for the v1/v2 zeros_like API calls.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:zeros_like_impl arg:tensor arg:dtype arg:name arg:optimize arg:layout arguments arg arg arg arg arg With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "nodes_map", + "source_code": "def nodes_map(nodes: list[torch.fx.Node], node_call_back) -> list[torch.fx.Node]:\n for node in nodes:\n node_call_back(node)\n return nodes", + "docstring": "Sequentially visit the nodes list and invoke node_call_back on each element. Returns the nodes list after the node_call_back is invoked on each element.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\utils.py", + "ast_data": "FunctionDef name:nodes_map arg:nodes arg:node_call_back arguments arg arg For Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, op, specs, name):\n self.op = op\n self.specs = specs\n self.name = name", + "docstring": "Creates a object. Args: op: the \"producer\" object that this class wraps; it produces a list of tensors to save. E.g., a \"Variable\" object saving its backing tensor. specs: a list of SaveSpec, each element of which describes one tensor to save under this object. All Tensors must be on the same device. name: the name to save the object under.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:op arg:specs arg:name arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "on_graph_execution_trace", + "source_code": "def on_graph_execution_trace(self, graph_execution_trace_index, graph_execution_trace):\n pass", + "docstring": "Monitor method for intra-graph execution events. Return values (if any) are ignored by the associated DebugDataReader. Args: graph_execution_trace_index: The index of the intra-graph execution event, as an int. graph_execution_trace: A GraphExecutionTrace data object, for an intra-graph tensor event.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_monitors.py", + "ast_data": "FunctionDef name:on_graph_execution_trace arg:self arg:graph_execution_trace_index arg:graph_execution_trace arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "softmax_cross_entropy_with_logits", + "source_code": "@tf_export(v1=['nn.softmax_cross_entropy_with_logits'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)\ndef softmax_cross_entropy_with_logits(labels=None, logits=None, dim=-1, name=None, axis=None):\n dim = deprecated_argument_lookup('axis', axis, 'dim', dim)\n _ensure_xent_args('softmax_cross_entropy_with_logits', labels, logits)\n with ops.name_scope(name, 'softmax_cross_entropy_with_logits_sg', [logits, labels]) as name:\n labels = array_ops.stop_gradient(labels, name='labels_stop_gradient')\n return softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits, axis=dim, name=name)", + "docstring": "Computes softmax cross entropy between and . Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive (wherein one and only one class is true at a time), see . **WARNING:** This op expects unscaled logits, since it performs a on internally for efficiency. Do not call this op with the output of , as it will produce incorrect results. A common use case is to have logits and labels of shape , but higher dimensions are supported, with the argument specifying the class dimension. Backpropagation will happen only into . To calculate a cross entropy loss that allows backpropagation into both and , see . **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape , each row of must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. dim: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). axis: Alias for dim. Returns: A that contains the softmax cross entropy loss. Its type is the same as and its shape is the same as except that it does not have the last dimension of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:softmax_cross_entropy_with_logits arg:labels arg:logits arg:dim arg:name arg:axis arguments arg arg arg arg arg Assign Call Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "flow", + "source_code": "@property\ndef flow(self):\n return self._flow", + "docstring": "For compatibility; flows are not meaningful when eager is enabled.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:flow arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_saved_model", + "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `lite.TFLiteConverter.from_saved_model` instead.')\ndef from_saved_model(cls, saved_model_dir, input_arrays=None, input_shapes=None, output_arrays=None, tag_set=None, signature_key=None):\n return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key)", + "docstring": "Creates a TocoConverter class from a SavedModel.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:from_saved_model arg:cls arg:saved_model_dir arg:input_arrays arg:input_shapes arg:output_arrays arg:tag_set arg:signature_key arguments arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "is_in_onnx_export", + "source_code": "def is_in_onnx_export() -> bool:\n from torch.onnx._globals import GLOBALS\n from torch.onnx._internal.exporter import _flags\n return GLOBALS.in_onnx_export or _flags._is_onnx_exporting", + "docstring": "Returns whether it is in the middle of ONNX export.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\__init__.py", + "ast_data": "FunctionDef name:is_in_onnx_export arguments Return return:yes BoolOp" + }, + { + "library": "seaborn", + "name": "mpl_palette", + "source_code": "def mpl_palette(name, n_colors=6, as_cmap=False):\n if name.endswith('_d'):\n sub_name = name[:-2]\n if sub_name.endswith('_r'):\n reverse = True\n sub_name = sub_name[:-2]\n else:\n reverse = False\n pal = color_palette(sub_name, 2) + ['#333333']\n if reverse:\n pal = pal[::-1]\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = get_colormap(name)\n if name in MPL_QUAL_PALS:\n bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n if as_cmap:\n return cmap\n else:\n return _ColorPalette(palette)", + "docstring": "Return a palette or colormap from the matplotlib registry. For continuous palettes, evenly-spaced discrete samples are chosen while excluding the minimum and maximum value in the colormap to provide better contrast at the extremes. For qualitative palettes (e.g. those from colorbrewer), exact values are indexed (rather than interpolated), but fewer than can be returned if the palette does not define that many. Parameters ---------- name : string Name of the palette. This should be a named matplotlib colormap. n_colors : int Number of discrete colors in the palette. Returns ------- list of RGB tuples or :class: Examples -------- .. include:: ../docstrings/mpl_palette.rst", + "type": "function", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:mpl_palette arg:name arg:n_colors arg:as_cmap arguments arg arg arg If Call Assign If Call Assign Assign Assign Assign Call If Assign Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Call Call Call If Return return:yes Return return:yes Call" + }, + { + "library": "scipy", + "name": "_nan_allsame", + "source_code": "def _nan_allsame(a, axis, keepdims=False):\n if axis is None:\n if a.size == 0:\n return True\n a = a.ravel()\n axis = 0\n else:\n shp = a.shape\n if shp[axis] == 0:\n shp = shp[:axis] + (1,) * keepdims + shp[axis + 1:]\n return np.full(shp, fill_value=True, dtype=bool)\n a0 = _first_nonnan(a, axis=axis)\n return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)", + "docstring": "Determine if the values along an axis are all the same. nan values are ignored. must be a numpy array. is assumed to be normalized; that is, 0 >> from numpy import nan, array >>> a = array([[ 3., 3., nan, 3.], ... [ 1., nan, 2., 4.], ... [nan, nan, 9., -1.], ... [nan, 5., 4., 3.], ... [ 2., 2., 2., 2.], ... [nan, nan, nan, nan]]) >>> _nan_allsame(a, axis=1, keepdims=True) array([[ True], [False], [False], [False], [ True], [ True]])", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_util.py", + "ast_data": "FunctionDef name:_nan_allsame arg:a arg:axis arg:keepdims arguments arg arg arg If Compare If Compare Return return:yes Assign Call Assign Assign If Compare Assign Return return:yes Call Assign Call Return return:yes Call Compare Call" + }, + { + "library": "cryptography", + "name": "subject_name", + "source_code": "def subject_name(self, name: Name) -> CertificateSigningRequestBuilder:\n if not isinstance(name, Name):\n raise TypeError('Expecting x509.Name object.')\n if self._subject_name is not None:\n raise ValueError('The subject name may only be set once.')\n return CertificateSigningRequestBuilder(name, self._extensions, self._attributes)", + "docstring": "Sets the certificate requestor's distinguished name.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\base.py", + "ast_data": "FunctionDef name:subject_name arg:self arg:name arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "sections", + "source_code": "def sections(self):\n return list(self._sections.keys())", + "docstring": "Return the section headers of the config file. Parameters ---------- None Returns ------- keys : list of str The list of section headers.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py", + "ast_data": "FunctionDef name:sections arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_filter_backed_symints", + "source_code": "@classmethod\ndef _filter_backed_symints(cls: type[GuardedCache[T]], inputs: Sequence[InputType]) -> list[torch.SymInt]:\n return [s for s in inputs if isinstance(s, torch.SymInt) and has_hint(s)]", + "docstring": "Get the backed SymInt objects from the input list. Note that we can never have guards that depend on unbacked symint.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:_filter_backed_symints arg:cls arg:inputs arguments arg arg Return return:yes BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "_convert", + "source_code": "def _convert(self, value, dtype):\n if isinstance(value, resource_variable_ops.ResourceVariable):\n raise RuntimeError(f'Attempting to return a variable from an eagerly executed py_func. Only numeric data structures like Tensors or NumPy arrays should be returned; to return the value of a variable, make sure to obtain the Tensor backing it by calling `.read_value()` on the variable in question: {value}')\n if value is None and self._is_grad_func:\n return constant_op.constant(0.0, dtype=dtype)\n return ops.convert_to_tensor(value, dtype=dtype)", + "docstring": "Converts to a tensor of type , with error checking. Args: value: The tensor to convert. dtype: The desired dtype. Returns: A tensor of type , or a zeros tensor if value is None and this function is in fact a gradient function. Raises: RuntimeError: if is a variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py", + "ast_data": "FunctionDef name:_convert arg:self arg:value arg:dtype arguments arg arg arg If Call Raise Call If BoolOp Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_root_scalar_toms748_doc", + "source_code": "def _root_scalar_toms748_doc():\n pass", + "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_root_scalar.py", + "ast_data": "FunctionDef name:_root_scalar_toms748_doc arguments" + }, + { + "library": "tensorflow", + "name": "_add_argument_transformer", + "source_code": "def _add_argument_transformer(parent, node, full_name, name, logs, arg_name, arg_value_ast):\n node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))\n logs.append((ast_edits.INFO, node.lineno, node.col_offset, \"Adding argument '%s' to call to %s.\" % (pasta.dump(node.keywords[-1]), full_name or name)))\n return node", + "docstring": "Adds an argument (as a final kwarg arg_name=arg_value_ast).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py", + "ast_data": "FunctionDef name:_add_argument_transformer arg:parent arg:node arg:full_name arg:name arg:logs arg:arg_name arg:arg_value_ast arguments arg arg arg arg arg arg arg Call Call Call Call BoolOp Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n Y = self.decision_function(X)\n if self.n_classes_ == 2:\n thresh = _threshold_for_binary_predict(self.estimators_[0])\n return self.classes_[(Y > thresh).astype(int)]\n return self.classes_[Y.argmax(axis=1)]", + "docstring": "Estimate the best class label for each sample in X. This is implemented as `` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Return return:yes Call Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_validate_flat_values_dynamically", + "source_code": "def _validate_flat_values_dynamically(self, flat_values):\n if self.row_partitions:\n assert_op = check_ops.assert_equal(self.row_partitions[-1].nvals(), array_ops.shape(flat_values, out_type=self.dtype)[0], message='Last row partition does not match flat_values.')\n return control_flow_ops.with_dependencies([assert_op], flat_values)\n return flat_values", + "docstring": "Test if flat_values have the right nvals dynamically.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_validate_flat_values_dynamically arg:self arg:flat_values arguments arg arg If Assign Call Call Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_emit_with_loc", + "source_code": "def _emit_with_loc(self, op_str, node=None):\n loc = ''\n if node:\n loc = self._create_mlir_loc(anno.getanno(node, anno.Basic.ORIGIN, default=None))\n self.emit(op_str + ' ' + loc)", + "docstring": "Emit the mlir operation with the location associated with the node. Args: op_str: The mlir operation string to be emitted. node: The node of the AST tree, the mlir operation translated from.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "FunctionDef name:_emit_with_loc arg:self arg:op_str arg:node arguments arg arg arg Assign If Assign Call Call Call" + }, + { + "library": "pandas", + "name": "flatten", + "source_code": "def flatten(line):\n for element in line:\n if iterable_not_string(element):\n yield from flatten(element)\n else:\n yield element", + "docstring": "Flatten an arbitrarily nested sequence. Parameters ---------- line : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:flatten arg:line arguments arg For If Call Call" + }, + { + "library": "scikit-learn", + "name": "__setstate__", + "source_code": "def __setstate__(self, state):\n super().__setstate__(state)\n if hasattr(self, 'X_thresholds_') and hasattr(self, 'y_thresholds_'):\n self._build_f(self.X_thresholds_, self.y_thresholds_)", + "docstring": "Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Call Call If BoolOp Call Call Call" + }, + { + "library": "scipy", + "name": "CrossInTray", + "source_code": "class CrossInTray(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [(1.34940668535334, 1.349406608602084), (-1.34940668535334, 1.349406608602084), (1.34940668535334, -1.349406608602084), (-1.34940668535334, -1.349406608602084)]\n self.fglob = -2.062611870822739\n\n def fun(self, x, *args):\n self.nfev += 1\n return -0.0001 * (abs(sin(x[0]) * sin(x[1]) * exp(abs(100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi))) + 1) ** 0.1", + "docstring": "Cross-in-Tray objective function. This class defines the Cross-in-Tray [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{CrossInTray}}(x) = - 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py", + "ast_data": "ClassDef name:CrossInTray FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, f: _T) -> _T:\n _stats_registry.register(f, self._op_type + ',' + self._statistic_type)\n return f", + "docstring": "Registers \"f\" as the statistics function for \"op_type\".", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:f arguments arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_trackable_children", + "source_code": "def _trackable_children(self, save_type=trackable.SaveType.CHECKPOINT, **kwargs):\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key\n weights = {}\n for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):\n if g == graph_key:\n weights[name] = v\n weights.update(super(LossScale, self)._trackable_children(save_type, **kwargs))\n return weights", + "docstring": "From Trackable. Gather graph-specific weights to save.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Call Assign Assign Call Assign Assign For Call Call arguments arg If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "legend", + "source_code": "@_docstring.interpd\ndef legend(self, *args, **kwargs):\n handles, labels, kwargs = mlegend._parse_legend_args([self], *args, **kwargs)\n self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)\n self.legend_._remove_method = self._remove_legend\n return self.legend_", + "docstring": "Place a legend on the Axes. Call signatures:: legend() legend(handles, labels) legend(handles=handles) legend(labels) The call signatures correspond to the following different ways to use this method: **1. Automatic detection of elements to be shown in the legend** The elements to be added to the legend are automatically determined, when you do not pass in any extra arguments. In this case, the labels are taken from the artist. You can specify them either at artist creation or by calling the :meth: method on the artist:: ax.plot([1, 2, 3], label='Inline label') ax.legend() or:: line, = ax.plot([1, 2, 3]) line.set_label('Label via method') ax.legend() .. note:: Specific artists can be excluded from the automatic legend element selection by using a label starting with an underscore, \"_\". A string starting with an underscore is the default label for all artists, so calling without any arguments and without setting the labels manually will result in a `.Artist.Artist~matplotlib.legend.Legendlegend_guide` for details. Examples -------- .. plot:: gallery/text_labels_and_annotations/legend.py", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:legend arg:self arguments arg arg arg Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "_check_pattern_startswith_slash", + "source_code": "def _check_pattern_startswith_slash(self):\n if not settings.APPEND_SLASH:\n return []\n if self._regex.startswith(('/', '^/', '^\\\\/')) and (not self._regex.endswith('/')):\n warning = Warning(\"Your URL pattern {} has a route beginning with a '/'. Remove this slash as it is unnecessary. If this pattern is targeted in an include(), ensure the include() pattern has a trailing '/'.\".format(self.describe()), id='urls.W002')\n return [warning]\n else:\n return []", + "docstring": "Check that the pattern does not begin with a forward slash.", + "type": "method", + "file_path": "django\\django\\urls\\resolvers.py", + "ast_data": "FunctionDef name:_check_pattern_startswith_slash arg:self arguments arg If Return return:no If BoolOp Call Call Assign Call Call Call Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "PerformanceWarning", + "source_code": "class PerformanceWarning(Warning):\n pass", + "docstring": "Warning raised when there is a possible performance impact. See Also -------- DataFrame.set_index : Set the DataFrame index using existing columns. DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Examples -------- >>> df = pd.DataFrame( ... {\"jim\": [0, 0, 1, 1], \"joe\": [\"x\", \"x\", \"z\", \"y\"], \"jolie\": [1, 2, 3, 4]} ... ) >>> df = df.set_index([\"jim\", \"joe\"]) >>> df jolie jim joe 0 x 1 x 2 1 z 3 y 4 >>> df.loc[(1, \"z\")] # doctest: +SKIP # PerformanceWarning: indexing past lexsort depth may impact performance. df.loc[(1, 'z')] jolie jim joe 1 z 3", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:PerformanceWarning" + }, + { + "library": "tensorflow", + "name": "_num_tasks", + "source_code": "def _num_tasks(self) -> int:\n return self._server.num_tasks()", + "docstring": "Returns the number of tasks currently being executed on the worker.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:_num_tasks arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "round", + "source_code": "@final\ndef round(self, decimals: int) -> Self:\n if not self.is_numeric or self.is_bool:\n return self.copy(deep=False)\n values = self.values.round(decimals)\n refs = None\n if values is self.values:\n refs = self.refs\n return self.make_block_same_class(values, refs=refs)", + "docstring": "Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavior. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg If BoolOp Return return:yes Call Assign Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_outer_context_and_inner_device_stack", + "source_code": "def _get_outer_context_and_inner_device_stack() -> tuple[Callable[[], ContextManager[Graph]], traceable_stack.TraceableStack]:\n default_graph = get_default_graph()\n outer_context = None\n innermost_nonempty_device_stack = default_graph._device_function_stack\n if not _default_graph_stack.stack:\n if default_graph.building_function:\n raise RuntimeError('The global graph is building a function.')\n outer_context = default_graph.as_default\n else:\n for stack_entry in reversed(context.context().context_switches.stack):\n if not innermost_nonempty_device_stack:\n innermost_nonempty_device_stack = stack_entry.device_stack\n if not stack_entry.is_building_function:\n outer_context = stack_entry.enter_context_fn\n break\n if outer_context is None:\n outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default\n if outer_context is None:\n raise RuntimeError('All graphs are building functions, and no eager context was previously active.')\n return (outer_context, innermost_nonempty_device_stack)", + "docstring": "Get the outermost context not building a function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_get_outer_context_and_inner_device_stack arguments Assign Call Assign Assign If If Raise Call Assign For Call Call If Assign If Assign If Compare Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "django", + "name": "BaseFinder", + "source_code": "class BaseFinder:\n\n def check(self, **kwargs):\n raise NotImplementedError('subclasses may provide a check() method to verify the finder is configured correctly.')\n\n def _check_deprecated_find_param(self, **kwargs):\n return _check_deprecated_find_param(class_name=self.__class__.__qualname__, stacklevel=4, **kwargs)\n\n def find(self, path, find_all=False, **kwargs):\n raise NotImplementedError('subclasses of BaseFinder must provide a find() method')\n\n def list(self, ignore_patterns):\n raise NotImplementedError('subclasses of BaseFinder must provide a list() method')", + "docstring": "A base file finder to be used for custom staticfiles finder classes.", + "type": "class", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "ClassDef name:BaseFinder FunctionDef name:check arg:self arguments arg arg Raise Call FunctionDef name:_check_deprecated_find_param arg:self arguments arg arg Return return:yes Call FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg Raise Call FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "TFRecordDatasetV1", + "source_code": "@tf_export(v1=['data.TFRecordDataset'])\nclass TFRecordDatasetV1(dataset_ops.DatasetV1Adapter):\n\n def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, name=None):\n wrapped = TFRecordDatasetV2(filenames, compression_type, buffer_size, num_parallel_reads, name=name)\n super(TFRecordDatasetV1, self).__init__(wrapped)\n __init__.__doc__ = TFRecordDatasetV2.__init__.__doc__\n\n @property\n def _filenames(self):\n return self._dataset._filenames\n\n @_filenames.setter\n def _filenames(self, value):\n self._dataset._filenames = value", + "docstring": "A comprising records from one or more TFRecord files.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "ClassDef name:TFRecordDatasetV1 FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg Assign Call Call Call Assign FunctionDef name:_filenames arg:self arguments arg Return return:yes FunctionDef name:_filenames arg:self arg:value arguments arg arg Assign Call" + }, + { + "library": "scipy", + "name": "poles", + "source_code": "@property\ndef poles(self):\n return self._poles", + "docstring": "Poles of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:poles arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "set_stance", + "source_code": "def set_stance(stance: str='default', *, skip_guard_eval_unsafe=False, force_backend=None):\n import torch._dynamo\n return torch._dynamo.set_stance(stance, skip_guard_eval_unsafe=skip_guard_eval_unsafe, force_backend=force_backend)", + "docstring": "Set the current stance of the compiler. Can be used as a function, context manager, or decorator. Do not use this function inside a region - an error will be raised otherwise. .. code-block:: python @torch.compile def foo(x): ... @torch.compiler.set_stance(\"force_eager\") def bar(): # will not be compiled foo(...) bar() with torch.compiler.set_stance(\"force_eager\"): # will also not be compiled foo(...) torch.compiler.set_stance(\"force_eager\") # will also not be compiled foo(...) torch.compiler.set_stance(\"default\") # will be compiled foo(...) Args: stance: The stance to set the compiler to. Valid values are: - \"default\": The default stance, used for normal compilation. - \"force_eager\": Ignore all directives. - \"eager_on_recompile\": Run code eagerly when a recompile is necessary. If there is cached compiled code valid for the input, it will still be used. - \"fail_on_recompile\": Raise an error when recompiling a function. skip_guard_eval_unsafe: A flag to run only differentiating guards. CAUTION - This flag is unsafe and should only be used if your setup meets the following conditions. torch.compile uses a guard system to support recompilations and choose which compiled artifact to run at runtime. These guards, though efficient, add some overhead, which may impact performance in scenarios where you need to optimize for minimal guard processing time. This API enables you to disable guard evaluation, assuming that you have warmed up the compiled model with a sufficient variety of inputs. This assumption means that, after the warmup phase, no further recompilations will be necessary. If this assumption fails, there is a risk of silently producing incorrect results (hence the term \"unsafe\" in the API name). force_backend: If is \"default\", this argument can be used to force to use a specific backend. Otherwise, an error is raised.", + "type": "function", + "file_path": "pytorch\\torch\\compiler\\__init__.py", + "ast_data": "FunctionDef name:set_stance arg:stance arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "padded", + "source_code": "def padded(self, w_pad, h_pad=None):\n points = self.get_points()\n if h_pad is None:\n h_pad = w_pad\n return Bbox(points + [[-w_pad, -h_pad], [w_pad, h_pad]])", + "docstring": "Construct a by padding this one on all four sides. Parameters ---------- w_pad : float Width pad h_pad : float, optional Height pad. Defaults to *w_pad*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:padded arg:self arg:w_pad arg:h_pad arguments arg arg arg Assign Call If Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_3d_extend_contour", + "source_code": "def _3d_extend_contour(self, cset, stride=5):\n dz = (cset.levels[1] - cset.levels[0]) / 2\n polyverts = []\n colors = []\n for idx, level in enumerate(cset.levels):\n path = cset.get_paths()[idx]\n subpaths = [*path._iter_connected_components()]\n color = cset.get_edgecolor()[idx]\n top = art3d._paths_to_3d_segments(subpaths, level - dz)\n bot = art3d._paths_to_3d_segments(subpaths, level + dz)\n if not len(top[0]):\n continue\n nsteps = max(round(len(top[0]) / stride), 2)\n stepsize = (len(top[0]) - 1) / (nsteps - 1)\n polyverts.extend([(top[0][round(i * stepsize)], top[0][round((i + 1) * stepsize)], bot[0][round((i + 1) * stepsize)], bot[0][round(i * stepsize)]) for i in range(round(nsteps) - 1)])\n colors.extend([color] * (round(nsteps) - 1))\n self.add_collection3d(art3d.Poly3DCollection(np.array(polyverts), facecolors=colors, edgecolors=colors, shade=True))\n cset.remove()", + "docstring": "Extend a contour in 3D by creating", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:_3d_extend_contour arg:self arg:cset arg:stride arguments arg arg arg Assign Assign Assign For Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "replace_method_name", + "source_code": "def replace_method_name(self, signature_key, method_name, tags=None):\n if not signature_key:\n raise ValueError('`signature_key` must be defined.')\n if not method_name:\n raise ValueError('`method_name` must be defined.')\n if tags is not None and (not isinstance(tags, list)):\n tags = [tags]\n found_match = False\n for meta_graph_def in self._saved_model.meta_graphs:\n if tags is None or set(tags) == set(meta_graph_def.meta_info_def.tags):\n if signature_key not in meta_graph_def.signature_def:\n raise ValueError(f\"MetaGraphDef associated with tags {tags} does not have a signature_def with key: '{signature_key}'. This means either you specified the wrong signature key or forgot to put the signature_def with the corresponding key in your SavedModel.\")\n meta_graph_def.signature_def[signature_key].method_name = method_name\n found_match = True\n if not found_match:\n raise ValueError(f'MetaGraphDef associated with tags {tags} could not be found in SavedModel. This means either you specified invalid tags or your SavedModel does not have a MetaGraphDef with the specified tags.')", + "docstring": "Replaces the method_name in the specified signature_def. This will match and replace multiple sig defs iff tags is None (i.e when multiple s have a signature_def with the same key). If tags is not None, this will only replace a single signature_def in the with matching tags. Args: signature_key: Key of the signature_def to be updated. method_name: new method_name to replace the existing one. tags: A tag or sequence of tags identifying the to update. If None, all meta graphs will be updated. Raises: ValueError: if signature_key or method_name are not defined or if no metagraphs were found with the associated tags or if no meta graph has a signature_def that matches signature_key.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\method_name_updater.py", + "ast_data": "FunctionDef name:replace_method_name arg:self arg:signature_key arg:method_name arg:tags arguments arg arg arg arg If Raise Call If Raise Call If BoolOp Compare Call Assign Assign For If BoolOp Compare Compare Call Call If Compare Raise Call Assign Assign If Raise Call" + }, + { + "library": "pytorch", + "name": "_check_shard_metadata_pair_overlap", + "source_code": "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]:\n return False\n return True", + "docstring": "Checks if two shards overlap.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py", + "ast_data": "FunctionDef name:_check_shard_metadata_pair_overlap arg:shard1 arg:shard2 arguments arg arg Assign Call For Call If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "scrapy", + "name": "re_rsearch", + "source_code": "def re_rsearch(pattern: str | Pattern[str], text: str, chunk_size: int=1024) -> tuple[int, int] | None:\n\n def _chunk_iter() -> Iterable[tuple[str, int]]:\n offset = len(text)\n while True:\n offset -= chunk_size * 1024\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n for chunk, offset in _chunk_iter():\n matches = list(pattern.finditer(chunk))\n if matches:\n start, end = matches[-1].span()\n return (offset + start, offset + end)\n return None", + "docstring": "This function does a reverse search in a text using a regular expression given in the attribute 'pattern'. Since the re module does not provide this functionality, we have to find for the expression into chunks of text extracted from the end (for the sake of efficiency). At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for the pattern. If the pattern is not found, another chunk is extracted, and another search is performed. This process continues until a match is found, or until the whole file is read. In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing the start position of the match, and the ending (regarding the entire text).", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:re_rsearch arg:pattern arg:text arg:chunk_size arguments arg arg arg FunctionDef name:_chunk_iter arguments Assign Call While If Compare If Call Assign Call For Call Assign Call Call If Assign Call Return return:yes Return return:no" + }, + { + "library": "numpy", + "name": "poly2lag", + "source_code": "def poly2lag(pol):\n [pol] = pu.as_series([pol])\n res = 0\n for p in pol[::-1]:\n res = lagadd(lagmulx(res), p)\n return res", + "docstring": "poly2lag(pol) Convert a polynomial to a Laguerre series. Convert an array representing the coefficients of a polynomial (relative to the \"standard\" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Laguerre series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Laguerre series. See Also -------- lag2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> import numpy as np >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\laguerre.py", + "ast_data": "FunctionDef name:poly2lag arg:pol arguments arg Assign Call Assign For Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "mask_rows", + "source_code": "def mask_rows(a, axis=np._NoValue):\n if axis is not np._NoValue:\n warnings.warn('The axis argument has always been ignored, in future passing it will raise TypeError', DeprecationWarning, stacklevel=2)\n return mask_rowcols(a, 0)", + "docstring": "Mask rows of a 2D array that contain masked values. This function is a shortcut to `axis` equal to 0. See Also -------- mask_rowcols : Mask rows and/or columns of a 2D array. masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = np.ma.masked_equal(a, 1) >>> a masked_array( data=[[0, 0, 0], [0, --, 0], [0, 0, 0]], mask=[[False, False, False], [False, True, False], [False, False, False]], fill_value=1) >>> np.ma.mask_rows(a) masked_array( data=[[0, 0, 0], [--, --, --], [0, 0, 0]], mask=[[False, False, False], [ True, True, True], [False, False, False]], fill_value=1)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:mask_rows arg:a arg:axis arguments arg arg If Compare Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "inplace_identity", + "source_code": "def inplace_identity(X):\n pass", + "docstring": "Simply leave the input array unchanged. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data, where is the number of samples and is the number of features.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py", + "ast_data": "FunctionDef name:inplace_identity arg:X arguments arg" + }, + { + "library": "tensorflow", + "name": "event_size", + "source_code": "@property\ndef event_size(self):\n return self._event_size", + "docstring": "Scalar tensor: the number of classes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py", + "ast_data": "FunctionDef name:event_size arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_min_max_value_by_expanding_range", + "source_code": "def _get_min_max_value_by_expanding_range(self, start_idx: int) -> tuple[float, float]:\n mse_min = (float('inf'), float('inf'), float('inf'))\n left, right = (start_idx, start_idx)\n move_left = True\n while not (left == 0 and right == self._num_bins - 1):\n if move_left and left > 0 or right == self._num_bins - 1:\n left = max(left - 1, 0)\n else:\n right = min(right + 1, self._num_bins - 1)\n move_left = not move_left\n quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n mse_min = min(mse_tuple, mse_min)\n min_value, max_value = (mse_min[1], mse_min[2])\n return (min_value, max_value)", + "docstring": "Starting from start_idx, expand left and right alternately to find the min value of mse loss. Args: start_idx: Index to start quantization. Returns: (min_value, max_value): Min and max calculated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py", + "ast_data": "FunctionDef name:_get_min_max_value_by_expanding_range arg:self arg:start_idx arguments arg arg Assign Call Call Call Assign Assign While BoolOp Compare Compare If BoolOp BoolOp Compare Compare Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_nontrivial_guards", + "source_code": "def get_nontrivial_guards(self) -> list[SympyBoolean]:\n return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr, axioms=(), size_oblivious=guard.size_oblivious) is None]", + "docstring": "Returns a list of guard expressions that aren't statically known (i.e. not trivial)", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:get_nontrivial_guards arg:self arguments arg Return return:yes Call Compare Call" + }, + { + "library": "tensorflow", + "name": "one_hot", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef one_hot(indices, num_classes):\n return array_ops.one_hot(indices, depth=num_classes, axis=-1)", + "docstring": "Computes the one-hot representation of an integer tensor. Args: indices: nD integer tensor of shape num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape Returns: The one-hot tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:one_hot arg:indices arg:num_classes arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "getmaxprint", + "source_code": "def getmaxprint(self):\n return self._getmaxprint()", + "docstring": "Maximum number of elements to display when printed.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_matrix.py", + "ast_data": "FunctionDef name:getmaxprint arg:self arguments arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "get_toc", + "source_code": "def get_toc(self) -> None:\n doctree = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False, includehidden=True)\n self.refnodes = self.get_refnodes(doctree, [])\n master_dir = Path(self.config.master_doc).parent\n for item in self.refnodes:\n item['refuri'] = str(master_dir / item['refuri'])\n self.toc_add_files(self.refnodes)", + "docstring": "Get the total table of contents, containing the master_doc and pre and post files not managed by Sphinx.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:get_toc arg:self arguments arg Assign Call Assign Call Assign Call For Assign Call Call" + }, + { + "library": "pytorch", + "name": "_get_modules", + "source_code": "def _get_modules(self) -> set[nn.Module]:\n return {pi.module for pi in self.flat_param._param_infos}.union({spi.module for spi in self.flat_param._shared_param_infos})", + "docstring": "Return a :class: of the modules whose parameters are included in this handle's flat parameter.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_get_modules arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "is_datetime64tz_dtype", + "source_code": "def is_datetime64tz_dtype(arr_or_dtype) -> bool:\n warnings.warn('is_datetime64tz_dtype is deprecated and will be removed in a future version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.', DeprecationWarning, stacklevel=2)\n if isinstance(arr_or_dtype, DatetimeTZDtype):\n return True\n if arr_or_dtype is None:\n return False\n return DatetimeTZDtype.is_dtype(arr_or_dtype)", + "docstring": "Check whether an array-like or dtype is of a DatetimeTZDtype dtype. .. deprecated:: 2.1.0 Use isinstance(dtype, pd.DatetimeTZDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. See Also -------- api.types.is_datetime64_dtype: Check whether an array-like or dtype is of the datetime64 dtype. api.types.is_datetime64_any_dtype: Check whether the provided array or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64tz_dtype >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\")) True >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\common.py", + "ast_data": "FunctionDef name:is_datetime64tz_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_data", + "source_code": "def set_data(self, A):\n if isinstance(A, PIL.Image.Image):\n A = pil_to_array(A)\n self._A = self._normalize_image_array(A)\n self._imcache = None\n self.stale = True", + "docstring": "Set the image array. Note that this function does *not* update the normalization used. Parameters ---------- A : array-like or", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:set_data arg:self arg:A arguments arg arg If Call Assign Call Assign Call Assign Assign" + }, + { + "library": "scipy", + "name": "bisplev", + "source_code": "def bisplev(x, y, tck, dx=0, dy=0):\n tx, ty, c, kx, ky = tck\n if not 0 <= dx < kx:\n raise ValueError(f'0 <= dx = {dx} < kx = {kx} must hold')\n if not 0 <= dy < ky:\n raise ValueError(f'0 <= dy = {dy} < ky = {ky} must hold')\n x, y = map(atleast_1d, [x, y])\n if len(x.shape) != 1 or len(y.shape) != 1:\n raise ValueError('First two entries should be rank-1 arrays.')\n msg = 'Too many data points to interpolate.'\n _int_overflow(x.size * y.size, MemoryError, msg=msg)\n if dx != 0 or dy != 0:\n _int_overflow((tx.size - kx - 1) * (ty.size - ky - 1), MemoryError, msg=msg)\n z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)\n else:\n z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)\n if ier == 10:\n raise ValueError('Invalid input data')\n if ier:\n raise TypeError('An error occurred')\n z.shape = (len(x), len(y))\n if len(z) > 1:\n return z\n if len(z[0]) > 1:\n return z[0]\n return z[0][0]", + "docstring": "Evaluate a bivariate B-spline and its derivatives. Return a rank-2 array of spline function values (or spline derivative values) at points given by the cross-product of the rank-1 arrays and . In special cases, return an array or just a float if either or or both are floats. Based on BISPEV and PARDER from FITPACK. Parameters ---------- x, y : ndarray Rank-1 arrays specifying the domain over which to evaluate the spline or its derivative. tck : tuple A sequence of length 5 returned by containing the knot locations, the coefficients, and the degree of the spline: [tx, ty, c, kx, ky]. dx, dy : int, optional The orders of the partial derivatives in and respectively. Returns ------- vals : ndarray The B-spline or its derivative evaluated over the set formed by the cross-product of and . See Also -------- splprep, splrep, splint, sproot, splev UnivariateSpline, BivariateSpline Notes ----- See to generate the representation. References ---------- .. [1] Dierckx P. : An algorithm for surface fitting with spline functions Ima J. Numer. Anal. 1 (1981) 267-283. .. [2] Dierckx P. : An algorithm for surface fitting with spline functions report tw50, Dept. Computer Science,K.U.Leuven, 1980. .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- Examples are given :ref:.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_impl.py", + "ast_data": "FunctionDef name:bisplev arg:x arg:y arg:tck arg:dx arg:dy arguments arg arg arg arg arg Assign If Compare Raise Call If Compare Raise Call Assign Call If BoolOp Compare Call Compare Call Raise Call Assign Call If BoolOp Compare Compare Call Assign Call Assign Call If Compare Raise Call If Raise Call Assign Call Call If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "deferred_exits", + "source_code": "@property\ndef deferred_exits(self):\n return self._deferred_exits", + "docstring": "The list of \"deferred\" exits.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:deferred_exits arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "greater_equal", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef greater_equal(x, y):\n return math_ops.greater_equal(x, y)", + "docstring": "Element-wise truth value of (x >= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:greater_equal arg:x arg:y arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None, sample_weight=None):\n first_call = not hasattr(self, 'classes_')\n X, y = self._check_X_y(X, y, reset=first_call)\n _, n_features = X.shape\n if _check_partial_fit_first_call(self, classes):\n n_classes = len(classes)\n self._init_counters(n_classes, n_features)\n Y = label_binarize(y, classes=self.classes_)\n if Y.shape[1] == 1:\n if len(self.classes_) == 2:\n Y = np.concatenate((1 - Y, Y), axis=1)\n else:\n Y = np.ones_like(Y)\n if X.shape[0] != Y.shape[0]:\n msg = 'X.shape[0]=%d and y.shape[0]=%d are incompatible.'\n raise ValueError(msg % (X.shape[0], y.shape[0]))\n Y = Y.astype(np.float64, copy=False)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X)\n sample_weight = np.atleast_2d(sample_weight)\n Y *= sample_weight.T\n class_prior = self.class_prior\n self._count(X, Y)\n alpha = self._check_alpha()\n self._update_feature_log_prob(alpha)\n self._update_class_log_prior(class_prior=class_prior)\n return self", + "docstring": "Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arg:sample_weight arguments arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Call Assign Call If Compare If Compare Call Assign Call Assign Call If Compare Assign Raise Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "unparse", + "source_code": "def unparse(node, indentation=None, include_encoding_marker=True):\n del indentation\n if not isinstance(node, (list, tuple)):\n node = (node,)\n codes = []\n if include_encoding_marker:\n codes.append('# coding=utf-8')\n for n in node:\n if isinstance(n, gast.AST):\n ast_n = gast.gast_to_ast(n)\n else:\n ast_n = n\n if astunparse is ast:\n ast.fix_missing_locations(ast_n)\n codes.append(astunparse.unparse(ast_n).strip())\n return '\\n'.join(codes)", + "docstring": "Returns the source code of given AST. Args: node: The code to compile, as an AST object. indentation: Unused, deprecated. The returning code will always be indented at 4 spaces. include_encoding_marker: Bool, whether to include a comment on the first line to explicitly specify UTF-8 encoding. Returns: code: The source code generated from the AST object source_mapping: A mapping between the user and AutoGraph generated code.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py", + "ast_data": "FunctionDef name:unparse arg:node arg:indentation arg:include_encoding_marker arguments arg arg arg If Call Assign Assign If Call For If Call Assign Call Assign If Compare Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load_source", + "source_code": "def load_source(source_file_path):\n if os.path.isfile(source_file_path):\n with open(source_file_path, 'rb') as f:\n source_text = f.read().decode('utf-8')\n source_lines = source_text.split('\\n')\n else:\n source_lines = _try_load_par_source(source_file_path)\n if source_lines is None:\n raise IOError('Source path neither exists nor can be loaded as a .par file: %s' % source_file_path)\n line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3\n return (source_lines, line_num_width)", + "docstring": "Load the content of a Python source code file. This function covers the following case: 1. source_file_path points to an existing Python (.py) file on the file system. 2. source_file_path is a path within a .par file (i.e., a zip-compressed, self-contained Python executable). Args: source_file_path: Path to the Python source file to read. Returns: A length-2 tuple: - Lines of the source file, as a of s. - The width of the string needed to show the line number in the file. This is calculated based on the number of lines in the source file. Raises: IOError: if loading is unsuccessful.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py", + "ast_data": "FunctionDef name:load_source arg:source_file_path arguments arg If Call With Call Assign Call Call Assign Call Assign Call If Compare Raise Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "output_types", + "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_types(iterator)`.')\ndef output_types(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)", + "docstring": "Returns the type of each component of an element of this iterator. Returns: A (nested) structure of objects corresponding to each component of an element of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:output_types arg:self arguments arg Return return:yes Call arguments arg Call Call" + }, + { + "library": "django", + "name": "PostgresOperatorLookup", + "source_code": "class PostgresOperatorLookup(Lookup):\n postgres_operator = None\n\n def as_postgresql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = tuple(lhs_params) + tuple(rhs_params)\n return ('%s %s %s' % (lhs, self.postgres_operator, rhs), params)", + "docstring": "Lookup defined by operators on PostgreSQL.", + "type": "class", + "file_path": "django\\django\\db\\models\\lookups.py", + "ast_data": "ClassDef name:PostgresOperatorLookup Assign FunctionDef name:as_postgresql arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "gmean", + "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])\ndef gmean(a, axis=0, dtype=None, weights=None):\n xp = array_namespace(a, weights)\n a = xp.asarray(a, dtype=dtype)\n if weights is not None:\n weights = xp.asarray(weights, dtype=dtype)\n with np.errstate(divide='ignore'):\n log_a = xp.log(a)\n return xp.exp(_xp_mean(log_a, axis=axis, weights=weights))", + "docstring": "Compute the weighted geometric mean along the specified axis. The weighted geometric mean of the array :math: associated to weights :math: is: .. math:: \\exp \\left( \\frac{ \\sum_{i=1}^n w_i \\ln a_i }{ \\sum_{i=1}^n w_i } \\right) \\, , and, with equal weights, it gives: .. math:: \\sqrt[n]{ \\prod_{i=1}^n a_i } \\, . Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array . dtype : dtype, optional Type to which the input arrays are cast before the calculation is performed. weights : array_like, optional The array must be broadcastable to the same shape as . Default is None, which gives each value a weight of 1.0. Returns ------- gmean : ndarray See parameter above. See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The sample geometric mean is the exponential of the mean of the natural logarithms of the observations. Negative observations will produce NaNs in the output because the *natural* logarithm (as opposed to the *complex* logarithm) is defined only for non-negative reals. References ---------- .. [1] \"Weighted Geometric Mean\", *Wikipedia*, .. [2] Grossman, J., Grossman, M., Katz, R., \"Averages: A New Approach\", Archimedes Foundation, 1983 Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964 >>> gmean([1, 4, 7], weights=[3, 1, 3]) 2.80668351922014", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:gmean arg:a arg:axis arg:dtype arg:weights arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call With Call Assign Call Return return:yes Call Call Call Call arguments arg arguments arg arg" + }, + { + "library": "seaborn", + "name": "_inverse", + "source_code": "def _inverse(self, values):\n return np.sqrt(values)", + "docstring": "Invert areal values back to point diameter.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:_inverse arg:self arg:values arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_setfieldnames", + "source_code": "def _setfieldnames(self, names, titles):\n if names:\n if type(names) in [list, tuple]:\n pass\n elif isinstance(names, str):\n names = names.split(',')\n else:\n raise NameError(f'illegal input names {repr(names)}')\n self._names = [n.strip() for n in names[:self._nfields]]\n else:\n self._names = []\n self._names += ['f%d' % i for i in range(len(self._names), self._nfields)]\n _dup = find_duplicate(self._names)\n if _dup:\n raise ValueError(f'Duplicate field names: {_dup}')\n if titles:\n self._titles = [n.strip() for n in titles[:self._nfields]]\n else:\n self._titles = []\n titles = []\n if self._nfields > len(titles):\n self._titles += [None] * (self._nfields - len(titles))", + "docstring": "convert input field names into a list and assign to the _names attribute", + "type": "method", + "file_path": "numpy\\numpy\\_core\\records.py", + "ast_data": "FunctionDef name:_setfieldnames arg:self arg:names arg:titles arguments arg arg arg If If Compare Call If Call Assign Call Raise Call Call Assign Call Assign Call Call Assign Call If Raise Call If Assign Call Assign Assign If Compare Call Call" + }, + { + "library": "pandas", + "name": "_encode_with_my_categories", + "source_code": "def _encode_with_my_categories(self, other: Categorical) -> Categorical:\n codes = recode_for_categories(other.codes, other.categories, self.categories, copy=False)\n return self._from_backing_data(codes)", + "docstring": "Re-encode another categorical using this Categorical's categories. Notes ----- This assumes we have already checked self._categories_match_up_to_permutation(other).", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_encode_with_my_categories arg:self arg:other arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_pow10m1", + "source_code": "def _pow10m1(x):\n return np.expm1(_POW10_LOG10 * x)", + "docstring": "10 ** x - 1 for x near 0", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:_pow10m1 arg:x arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "open_file_cm", + "source_code": "def open_file_cm(path_or_file, mode='r', encoding=None):\n fh, opened = to_filehandle(path_or_file, mode, True, encoding)\n return fh if opened else contextlib.nullcontext(fh)", + "docstring": "Pass through file objects and context-manage path-likes.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:open_file_cm arg:path_or_file arg:mode arg:encoding arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "pool", + "source_code": "def pool(self):\n return super().pool()", + "docstring": "Return an opaque token representing the id of this graph's memory pool. This id can optionally be passed to another graph's ``, which hints the other graph may share the same memory pool.", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\graphs.py", + "ast_data": "FunctionDef name:pool arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_table_list", + "source_code": "def get_table_list(self, cursor):\n cursor.execute('\\n SELECT\\n table_name,\\n table_type,\\n table_comment\\n FROM information_schema.tables\\n WHERE table_schema = DATABASE()\\n ')\n return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]), row[2]) for row in cursor.fetchall()]", + "docstring": "Return a list of table and view names in the current database.", + "type": "method", + "file_path": "django\\django\\db\\backends\\mysql\\introspection.py", + "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "desc_sig_keyword_type", + "source_code": "class desc_sig_keyword_type(desc_sig_element, _sig_element=True):\n classes = ['kt']", + "docstring": "Node for a keyword which is a built-in type in a signature.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_sig_keyword_type Assign" + }, + { + "library": "pytorch", + "name": "get_backend_config", + "source_code": "def get_backend_config(group: Optional[ProcessGroup]=None) -> str:\n pg = group or _get_default_group()\n if _rank_not_in_group(pg):\n raise ValueError('Invalid process group specified')\n backend_config = _world.pg_backend_config.get(pg)\n return str(not_none(backend_config))", + "docstring": "Return the backend configuration of the given process group. Args: group (ProcessGroup, optional): The process group to work on. The default is the general main process group. If another specific group is specified, the calling process must be part of :attr:. Returns: The backend configuration of the given process group as a lower case string.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:get_backend_config arg:group arguments arg Assign BoolOp Call If Call Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_pad_dense_input", + "source_code": "@classmethod\ndef _pad_dense_input(cls, dense_input: torch.Tensor) -> torch.Tensor:\n assert dense_input.dim() == 2\n m, n = dense_input.shape\n min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_rows\n min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_cols\n to_pad_m = -m % min_rows if m < min_rows or m % min_rows else 0\n to_pad_n = -n % min_cols if n < min_cols or n % min_rows else 0\n if to_pad_m or to_pad_n:\n return torch.nn.functional.pad(dense_input, (0, to_pad_n, 0, to_pad_m))\n else:\n return dense_input", + "docstring": "Calculates padding for dense tensor and pads tensor if necessary. If padding is not required, this function returns the original tensor.", + "type": "method", + "file_path": "pytorch\\torch\\sparse\\semi_structured.py", + "ast_data": "FunctionDef name:_pad_dense_input arg:cls arg:dense_input arguments arg arg Compare Call Assign Assign Assign Assign BoolOp Compare Assign BoolOp Compare If BoolOp Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "check_termination", + "source_code": "def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None", + "docstring": "Check termination condition for nonlinear least squares.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:check_termination arg:dF arg:F arg:dx_norm arg:x_norm arg:ratio arg:ftol arg:xtol arguments arg arg arg arg arg arg arg Assign BoolOp Compare Compare Assign Compare If BoolOp Return return:yes If Return return:yes If Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "_prefix_output_keys", + "source_code": "def _prefix_output_keys(self, output_dict, output_name):\n new_outputs = {}\n for key, val in output_dict.items():\n key = self._prefix_key(key, output_name)\n new_outputs[key] = val\n return new_outputs", + "docstring": "Prepend output_name to the output_dict keys if it doesn't exist. This produces predictable prefixes for the pre-determined outputs of SupervisedOutput. Args: output_dict: dict of string to Tensor, assumed valid. output_name: prefix string to prepend to existing keys. Returns: dict with updated keys and existing values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py", + "ast_data": "FunctionDef name:_prefix_output_keys arg:self arg:output_dict arg:output_name arguments arg arg arg Assign For Call Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "SpatialiteGeometryColumns", + "source_code": "class SpatialiteGeometryColumns(models.Model):\n f_table_name = models.CharField(max_length=256)\n f_geometry_column = models.CharField(max_length=256)\n coord_dimension = models.IntegerField()\n srid = models.IntegerField(primary_key=True)\n spatial_index_enabled = models.IntegerField()\n type = models.IntegerField(db_column='geometry_type')\n\n class Meta:\n app_label = 'gis'\n db_table = 'geometry_columns'\n managed = False\n\n def __str__(self):\n return '%s.%s - %dD %s field (SRID: %d)' % (self.f_table_name, self.f_geometry_column, self.coord_dimension, self.type, self.srid)\n\n @classmethod\n def table_name_col(cls):\n return 'f_table_name'\n\n @classmethod\n def geom_col_name(cls):\n return 'f_geometry_column'", + "docstring": "The 'geometry_columns' table from SpatiaLite.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py", + "ast_data": "ClassDef name:SpatialiteGeometryColumns Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:table_name_col arg:cls arguments arg Return return:yes FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "ragged_one_hot", + "source_code": "@dispatch.dispatch_for_api(array_ops.one_hot)\ndef ragged_one_hot(indices: ragged_tensor.Ragged, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None):\n if isinstance(axis, int) and axis >= 0:\n if axis <= indices.ragged_rank:\n raise ValueError('axis (%d) must be greater than indices.ragged_rank (%d).' % (axis, indices.ragged_rank))\n axis -= indices.ragged_rank\n with ops.name_scope(name, 'RaggedOneHot', [indices, depth, on_value, off_value, axis]):\n indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')\n return indices.with_flat_values(array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis, dtype, name))", + "docstring": "Applies tf.one_hot along the values of a RaggedTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:ragged_one_hot arg:indices arg:depth arg:on_value arg:off_value arg:axis arg:dtype arg:name arguments arg arg arg arg arg arg arg If BoolOp Call Compare If Compare Raise Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "start_queue_runners", + "source_code": "def start_queue_runners(self, sess, queue_runners=None):\n if context.executing_eagerly():\n raise RuntimeError('Queues are not compatible with eager execution.')\n if queue_runners is None:\n queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)\n threads = []\n for qr in queue_runners:\n threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True))\n return threads", + "docstring": "Start threads for . Note that the queue runners collected in the graph key are already started automatically when you create a session with the supervisor, so unless you have non-collected queue runners to start you do not need to call this explicitly. Args: sess: A . queue_runners: A list of . If not specified, we'll use the list of queue runners gathered in the graph under the key . Returns: The list of threads started for the . Raises: RuntimeError: If called with eager execution enabled. @compatibility(eager) Queues are not compatible with eager execution. To ingest data when eager execution is enabled, use the API. @end_compatibility", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:start_queue_runners arg:self arg:sess arg:queue_runners arguments arg arg arg If Call Raise Call If Compare Assign Call Assign For Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_IntWrapper", + "source_code": "@dataclasses.dataclass\nclass _IntWrapper:\n val: int\n dynamism: Optional[Union[_DimHint, int]] = dataclasses.field(init=False, default=None)", + "docstring": "Dummy wrapper class to wrap around integer inputs so that when we parse the dynamic_shapes structure, we can mark if any of the integers were marked as dynamic.", + "type": "class", + "file_path": "pytorch\\torch\\export\\dynamic_shapes.py", + "ast_data": "ClassDef name:_IntWrapper Call" + }, + { + "library": "tensorflow", + "name": "np_reshape", + "source_code": "def np_reshape(a, /, shape=None, *, newshape=None, order='C', copy=None):\n if shape is None:\n shape = newshape\n if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0':\n if shape is None and newshape is None:\n return np.asarray(a, order=order, copy=copy)\n return np.reshape(a, shape, order=order, copy=copy)\n return np.reshape(a, shape, order=order)", + "docstring": "Reshapes an array without changing its data. NumPy 2.1.0rc1 added shape and copy arguments to numpy.reshape. See Both newshape and shape keywords are supported, but newshape is going to be deprecated. Use instead. Besides, shape cannot be None now. See Previously, np.reshape with newshape=None returned a copy. To maintain this behavior, we now use asarray to create an ndarray. Args: a: Array_like. Array to be reshaped. shape: The new shape of the array. newshape: The new shape of the array (deprecated). order: {‘C’, ‘F’, ‘K’}. copy: bool. If True, then the array data is copied. If None, a copy will only be made if it’s required by order. For False it raises a ValueError if a copy cannot be avoided. Returns: This will be a new view object if possible; otherwise, it will be a copy.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py", + "ast_data": "FunctionDef name:np_reshape arg:shape arguments arg arg arg arg arg If Compare Assign If Compare Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n if context.executing_eagerly():\n return self._name\n return self._barrier_ref.op.name", + "docstring": "The name of the underlying barrier.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:name arg:self arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "__getitem__", + "source_code": "def __getitem__(self, item):\n if not self._isinit:\n self._init()\n if item == 0:\n origin_1_as_int = int(self._origin[1] * self.M)\n if origin_1_as_int > self.M - 1:\n origin_1_as_int = self.M - 1\n one_d_lut = self._lut[:, origin_1_as_int]\n new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_0')\n elif item == 1:\n origin_0_as_int = int(self._origin[0] * self.N)\n if origin_0_as_int > self.N - 1:\n origin_0_as_int = self.N - 1\n one_d_lut = self._lut[origin_0_as_int, :]\n new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_1')\n else:\n raise KeyError(f'only 0 or 1 are valid keys for BivarColormap, not {item!r}')\n new_cmap._rgba_bad = self._rgba_bad\n if self.shape in ['ignore', 'circleignore']:\n new_cmap.set_over(self._rgba_outside)\n new_cmap.set_under(self._rgba_outside)\n return new_cmap", + "docstring": "Creates and returns a colorbar along the selected axis", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:item arguments arg arg If Call If Compare Assign Call If Compare Assign Assign Assign Call If Compare Assign Call If Compare Assign Assign Assign Call Raise Call Assign If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_diag", + "source_code": "def _get_diag(self):\n return array_ops.matrix_diag_part(self._tril)", + "docstring": "Gets the diagonal part of kwarg.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py", + "ast_data": "FunctionDef name:_get_diag arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "commands", + "source_code": "def commands(self) -> List[List[str]]:\n cmds = []\n cmds.extend(self.extra_setup_commands)\n macos_build = self.type_ == BuildType.XLA_MACOS_X86_CPU_KOKORO or self.type_ == BuildType.XLA_MACOS_ARM64_CPU_KOKORO\n if not macos_build:\n cmds.append(retry(self.bazel_command(subcommand='build', extra_options=('--nobuild',))))\n cmds.append(self.bazel_command(subcommand=self.subcommand))\n cmds.append(['bazel', 'analyze-profile', 'profile.json.gz'])\n return cmds", + "docstring": "Returns list of commands for a build.", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py", + "ast_data": "FunctionDef name:commands arg:self arguments arg Assign Call Assign BoolOp Compare Compare If Call Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_propagate_module_bias", + "source_code": "def _propagate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:\n if module.bias is not None:\n module.bias = nn.Parameter(cast(Tensor, module.bias)[mask])\n elif getattr(module, '_bias', None) is not None:\n module.bias = nn.Parameter(cast(Tensor, module._bias)[mask])\n if getattr(module, '_bias', None) is not None:\n pruned_biases = cast(Tensor, module._bias)[~mask]\n else:\n pruned_biases = None\n if hasattr(module, '_bias'):\n delattr(module, '_bias')\n return pruned_biases", + "docstring": "In the case that we need to propagate biases, this function will return the biases we need", + "type": "function", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\prune_functions.py", + "ast_data": "FunctionDef name:_propagate_module_bias arg:module arg:mask arguments arg arg If Compare Assign Call Call If Compare Call Assign Call Call If Compare Call Assign Call Assign If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "record", + "source_code": "def record(self, flat_outputs, inference_args, input_tangents):\n backward_function, to_record = self._backward(flat_outputs)\n record.record_operation(self._inference_function.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)", + "docstring": "Record the function call operation. _DelayedRewriteGradientFunctions supports only first-order backprop tape gradients (and then only when graph building). It does not work with higher-order tape gradients or forward autodiff, but does work with higher-order symbolic gradients (tf.gradients). Args: flat_outputs: The result of running . inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:record arg:self arg:flat_outputs arg:inference_args arg:input_tangents arguments arg arg arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "create_pytorch_op_test_case", + "source_code": "def create_pytorch_op_test_case(op_bench, test_config):\n test_case = PyTorchOperatorTestCase(op_bench, test_config)\n test_config = test_case.test_config\n op = test_case.op_bench\n func_name = f'{op.module_name()}{test_case.framework}{str(test_config)}'\n return (func_name, test_case)", + "docstring": "This method is used to generate est. func_name is a global unique string. For PyTorch add operator with M=8, N=2, K=1, tag = long, here are the values for the members in test_case: op.module_name: add framework: PyTorch test_config: TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False) func_name: addPyTorchTestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False)", + "type": "function", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:create_pytorch_op_test_case arg:op_bench arg:test_config arguments arg arg Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "should_trigger_for_step", + "source_code": "def should_trigger_for_step(self, step):\n raise NotImplementedError", + "docstring": "Return true if the timer should trigger for the specified step.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:should_trigger_for_step arg:self arg:step arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs):\n super().__init__(reduction=reduction, name=name)\n self.fn = fn\n self._fn_kwargs = kwargs", + "docstring": "Initializes class. Args: fn: The loss function to wrap, with signature . reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. **kwargs: The keyword arguments that are passed on to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fn arg:reduction arg:name arguments arg arg arg arg arg Call Call Assign Assign" + }, + { + "library": "scipy", + "name": "fftfreq", + "source_code": "def fftfreq(n, d=1.0, *, xp=None, device=None):\n xp = np if xp is None else xp\n if hasattr(xp, 'fft') and xp.__name__ != 'numpy':\n return xp.fft.fftfreq(n, d=d, device=device)\n if device is not None:\n raise ValueError('device parameter is not supported for input array type')\n return np.fft.fftfreq(n, d=d)", + "docstring": "Return the Discrete Fourier Transform sample frequencies. The returned float array contains the frequency bin centers in cycles per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length and a sample spacing :: f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd Parameters ---------- n : int Window length. d : scalar, optional Sample spacing (inverse of the sampling rate). Defaults to 1. xp : array_namespace, optional The namespace for the return array. Default is None, where NumPy is used. device : device, optional The device for the return array. Only valid when implements the device parameter. Returns ------- f : ndarray Array of length containing the sample frequencies. Examples -------- >>> import numpy as np >>> import scipy.fft >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> fourier = scipy.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 >>> freq = scipy.fft.fftfreq(n, d=timestep) >>> freq array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_helper.py", + "ast_data": "FunctionDef name:fftfreq arg:n arg:d arguments arg arg arg arg Assign Compare If BoolOp Call Compare Return return:yes Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "new_parameter_placeholder", + "source_code": "def new_parameter_placeholder(size: tuple[int, ...], dtype: torch.dtype, device: torch.device, requires_grad: bool) -> torch.nn.Parameter:\n result = torch.nn.Parameter(torch.empty(size, dtype=dtype, device=device), requires_grad=requires_grad)\n result.untyped_storage().resize_(0)\n return result", + "docstring": "Create a placeholder to be passed to the above functions", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\create_parameter_op.py", + "ast_data": "FunctionDef name:new_parameter_placeholder arg:size arg:dtype arg:device arg:requires_grad arguments arg arg arg arg Assign Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_extend", + "source_code": "def _extend(M, sym):\n if not sym:\n return (M + 1, True)\n else:\n return (M, False)", + "docstring": "Extend window by 1 sample if needed for DFT-even symmetry", + "type": "function", + "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", + "ast_data": "FunctionDef name:_extend arg:M arg:sym arguments arg arg If Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "distance_matrix", + "source_code": "def distance_matrix(x, y, p=2, threshold=1000000):\n x = np.asarray(x)\n m, k = x.shape\n y = np.asarray(y)\n n, kk = y.shape\n if k != kk:\n raise ValueError(f'x contains {k}-dimensional vectors but y contains {kk}-dimensional vectors')\n if m * n * k <= threshold:\n return minkowski_distance(x[:, np.newaxis, :], y[np.newaxis, :, :], p)\n else:\n result = np.empty((m, n), dtype=float)\n if m < n:\n for i in range(m):\n result[i, :] = minkowski_distance(x[i], y, p)\n else:\n for j in range(n):\n result[:, j] = minkowski_distance(x, y[j], p)\n return result", + "docstring": "Compute the distance matrix. Returns the matrix of all pair-wise distances. Parameters ---------- x : (M, K) array_like Matrix of M vectors in K dimensions. y : (N, K) array_like Matrix of N vectors in K dimensions. p : float, 1 , algorithm uses a Python loop instead of large temporary arrays. Returns ------- result : (M, N) ndarray Matrix containing the distance from every vector in to every vector in . Examples -------- >>> from scipy.spatial import distance_matrix >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]]) array([[ 1. , 1.41421356], [ 1.41421356, 1. ]])", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\_kdtree.py", + "ast_data": "FunctionDef name:distance_matrix arg:x arg:y arg:p arg:threshold arguments arg arg arg arg Assign Call Assign Assign Call Assign If Compare Raise Call If Compare Return return:yes Call Assign Call If Compare For Call Assign Call For Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "OpsSet", + "source_code": "@_tf_export('lite.OpsSet')\nclass OpsSet(enum.Enum):\n TFLITE_BUILTINS = 'TFLITE_BUILTINS'\n SELECT_TF_OPS = 'SELECT_TF_OPS'\n TFLITE_BUILTINS_INT8 = 'TFLITE_BUILTINS_INT8'\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = 'EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8'\n EXPERIMENTAL_STABLEHLO_OPS = 'EXPERIMENTAL_STABLEHLO_OPS'\n\n def __str__(self):\n return str(self.value)\n\n @staticmethod\n def get_options():\n return [str(option) for option in list(OpsSet)]", + "docstring": "Enum class defining the sets of ops available to generate TFLite models. WARNING: Experimental interface, subject to change.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", + "ast_data": "ClassDef name:OpsSet Assign Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:get_options arguments Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_VariantDataset", + "source_code": "class _VariantDataset(DatasetV2):\n\n def __init__(self, dataset_variant, element_spec):\n self._element_spec = element_spec\n super(_VariantDataset, self).__init__(dataset_variant)\n\n def _inputs(self):\n return []\n\n @property\n def element_spec(self):\n return self._element_spec", + "docstring": "A Dataset wrapper around a -typed function argument.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "ClassDef name:_VariantDataset FunctionDef name:__init__ arg:self arg:dataset_variant arg:element_spec arguments arg arg arg Assign Call Call FunctionDef name:_inputs arg:self arguments arg Return return:no FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "__add__", + "source_code": "def __add__(self, other):\n if not self._check_binop_other(other):\n return NotImplemented\n if isinstance(other, StateSpace):\n if type(other) is not type(self):\n raise TypeError(f'Cannot add {type(self)} and {type(other)}')\n if self.dt != other.dt:\n raise TypeError('Cannot add systems with different `dt`.')\n a = linalg.block_diag(self.A, other.A)\n b = np.vstack((self.B, other.B))\n c = np.hstack((self.C, other.C))\n d = self.D + other.D\n else:\n other = np.atleast_2d(other)\n if self.D.shape == other.shape:\n a = self.A\n b = self.B\n c = self.C\n d = self.D + other\n else:\n raise ValueError(f'Cannot add systems with incompatible dimensions ({self.D.shape} and {other.shape})')\n common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)\n return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict)", + "docstring": "Adds two systems in the sense of frequency domain addition.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg If Call Return return:yes If Call If Compare Call Call Raise Call Call Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Assign Call If Compare Assign Assign Assign Assign Raise Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_GetGradWrt", + "source_code": "def _GetGradWrt(output_grad, other_operand, input_shape, input_subs, other_subs, output_subs):\n reduced_label_set = set(input_subs).difference(set(output_subs + other_subs + '.'))\n left_subs = ''.join((s for s in input_subs if s not in reduced_label_set))\n grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand], '{},{}->{}'.format(output_subs, other_subs, left_subs))\n if not reduced_label_set:\n return grad_reduced\n return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, reduced_label_set)", + "docstring": "Returns the gradient wrt an input operand for a binary einsum. This function does not handle (un)broadcasting. This must be done separately on the returned gradient. Args: output_grad: The gradient wrt the output of a binary einsum operation. other_operand: The complementary operand i.e. which is not the input operand. input_shape: A representing the shape of input operand. input_subs: The subscripts of the input operand. other_subs: The subscripts of the complementary operand. output_subs: The output subscripts.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_GetGradWrt arg:output_grad arg:other_operand arg:input_shape arg:input_subs arg:other_subs arg:output_subs arguments arg arg arg arg arg arg Assign Call Call Call Assign Call Compare Assign Call Call If Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "graph", + "source_code": "@property\ndef graph(self):\n return self._func_graph", + "docstring": "Returns the graph from which this function was constructed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "ELU", + "source_code": "class ELU(Layer):\n\n def __init__(self, alpha=1.0, **kwargs):\n super(ELU, self).__init__(**kwargs)\n if alpha is None:\n raise ValueError('Alpha of an ELU layer cannot be None, requires a float. Got %s' % alpha)\n self.supports_masking = True\n self.alpha = backend.cast_to_floatx(alpha)\n\n def call(self, inputs):\n return backend.elu(inputs, self.alpha)\n\n def get_config(self):\n config = {'alpha': float(self.alpha)}\n base_config = super(ELU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape", + "docstring": "Exponential Linear Unit. It follows: Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Args: alpha: Scale for the negative factor.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py", + "ast_data": "ClassDef name:ELU FunctionDef name:__init__ arg:self arg:alpha arguments arg arg arg Call Call If Compare Raise Call Assign Assign Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "limit_offset_sql", + "source_code": "def limit_offset_sql(self, low_mark, high_mark):\n limit, offset = self._get_limit_offset_params(low_mark, high_mark)\n return ' '.join((sql for sql in ('LIMIT %d' % limit if limit else None, 'OFFSET %d' % offset if offset else None) if sql))", + "docstring": "Return LIMIT/OFFSET SQL clause.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:limit_offset_sql arg:self arg:low_mark arg:high_mark arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "_md5sum", + "source_code": "def _md5sum(file: IO[bytes]) -> str:\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()", + "docstring": "Calculate the md5 checksum of a file-like object without reading its whole content in memory. >>> from io import BytesIO >>> _md5sum(BytesIO(b'file content to hash')) '784406af91dd5a54fbb9c84c2236595a'", + "type": "function", + "file_path": "scrapy\\scrapy\\pipelines\\files.py", + "ast_data": "FunctionDef name:_md5sum arg:file arguments arg Assign Call While Assign Call If Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_display_summary", + "source_code": "def _display_summary(message, status, fun, iteration):\n print(message)\n if status in (0, 1):\n print(f' Current function value: {fun: <12.6f}')\n print(f' Iterations: {iteration:d}')", + "docstring": "Print the termination summary of the linear program Parameters ---------- message : str A string descriptor of the exit status of the optimization. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered fun : float Value of the objective function. iteration : iteration The number of iterations performed.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_util.py", + "ast_data": "FunctionDef name:_display_summary arg:message arg:status arg:fun arg:iteration arguments arg arg arg arg Call If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "sum", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sum(x, axis=None, keepdims=False):\n return math_ops.reduce_sum(x, axis, keepdims)", + "docstring": "Sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with sum of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:sum arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "random_shuffle", + "source_code": "@dispatch.dispatch_for_types(random_ops.random_shuffle, StructuredTensor)\ndef random_shuffle(value, seed=None, name=None):\n with ops.name_scope(name, 'shuffle', [value, seed]):\n if value.rank == 0:\n raise ValueError('Cannot shuffle a scalar StructuredTensor')\n first_dimension = value.nrows()\n index = random_ops.random_shuffle(math_ops.range(first_dimension), seed=seed)\n return gather(value, index, axis=0)", + "docstring": "Shuffle a structured tensor on the zeroth axis. Args: value: a structured tensor of rank at least one. seed: the seed for shuffling. name: the name for shuffle. Returns: The shuffled structured tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:random_shuffle arg:value arg:seed arg:name arguments arg arg arg With Call If Compare Raise Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_thread_name", + "source_code": "def _get_thread_name() -> str:\n return torch._C._get_thread_name()", + "docstring": "Get the name of the current thread. Returns: str: Name of the current thread.", + "type": "function", + "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", + "ast_data": "FunctionDef name:_get_thread_name arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "node_name", + "source_code": "@property\ndef node_name(self):\n return self._node_name", + "docstring": "Name of the node from which the tensor value was dumped. Returns: () name of the node watched by the debug op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:node_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "reopen", + "source_code": "def reopen(self):\n self.event_writer.reopen()\n self._closed = False", + "docstring": "Reopens the EventFileWriter. Can be called after to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py", + "ast_data": "FunctionDef name:reopen arg:self arguments arg Call Assign" + }, + { + "library": "pandas", + "name": "count", + "source_code": "def count(self) -> int:\n return notna(self._values).sum().astype('int64')", + "docstring": "Return number of non-NA/null observations in the Series. Returns ------- int Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:count arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "serialize_sparse_v2", + "source_code": "@tf_export('io.serialize_sparse', v1=[])\n@dispatch.add_dispatch_support\ndef serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n sp_input = _convert_to_sparse_tensor(sp_input)\n return gen_sparse_ops.serialize_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)", + "docstring": "Serialize a into a 3-vector (1-D ) object. Args: sp_input: The input . out_type: The to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A 3-vector (1-D ), with each column representing the serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:serialize_sparse_v2 arg:sp_input arg:out_type arg:name arguments arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n y = (self.decision_function(X) >= 0).astype(np.int32)\n y[y == 0] = -1\n return y", + "docstring": "Return labels (1 inlier, -1 outlier) of the samples. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- y : array, shape (n_samples,) Labels of the samples.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Compare Call Assign Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "cholesky", + "source_code": "def cholesky(self, name: str='cholesky') -> 'LinearOperator':\n if not self._can_use_cholesky():\n raise ValueError('Cannot take the Cholesky decomposition: Not a positive definite self adjoint matrix.')\n with self._name_scope(name):\n return self._linop_cholesky()", + "docstring": "Returns a Cholesky factor as a . Given representing this , if is positive definite self-adjoint, return , where , i.e. the cholesky decomposition. Args: name: A name for this . Returns: which represents the lower triangular matrix in the Cholesky decomposition. Raises: ValueError: When the is not hinted to be positive definite and self adjoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:cholesky arg:self arg:name arguments arg arg If Call Raise Call With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "SymIntSymbolicContext", + "source_code": "@dataclass(frozen=True)\nclass SymIntSymbolicContext(SymbolicContext):\n constraint: DimConstraint", + "docstring": "Data structure specifying any constraints on a SymInt input", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "ClassDef name:SymIntSymbolicContext Call" + }, + { + "library": "numpy", + "name": "atleast_3d", + "source_code": "@array_function_dispatch(_atleast_3d_dispatcher)\ndef atleast_3d(*arys):\n res = []\n for ary in arys:\n ary = asanyarray(ary)\n if ary.ndim == 0:\n result = ary.reshape(1, 1, 1)\n elif ary.ndim == 1:\n result = ary[_nx.newaxis, :, _nx.newaxis]\n elif ary.ndim == 2:\n result = ary[:, :, _nx.newaxis]\n else:\n result = ary\n res.append(result)\n if len(res) == 1:\n return res[0]\n else:\n return tuple(res)", + "docstring": "View inputs as arrays with at least three dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have three or more dimensions are preserved. Returns ------- res1, res2, ... : ndarray An array, or tuple of arrays, each with ``. See Also -------- atleast_1d, atleast_2d Examples -------- >>> import numpy as np >>> np.atleast_3d(3.0) array([[[3.]]]) >>> x = np.arange(3.0) >>> np.atleast_3d(x).shape (1, 3, 1) >>> x = np.arange(12.0).reshape(4,3) >>> np.atleast_3d(x).shape (4, 3, 1) >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself True >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): ... print(arr, arr.shape) # doctest: +SKIP ... [[[1] [2]]] (1, 2, 1) [[[1] [2]]] (1, 2, 1) [[[1 2]]] (1, 1, 2)", + "type": "function", + "file_path": "numpy\\numpy\\_core\\shape_base.py", + "ast_data": "FunctionDef name:atleast_3d arguments arg Assign For Assign Call If Compare Assign Call If Compare Assign If Compare Assign Assign Call If Compare Call Return return:yes Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "is_venv", + "source_code": "def is_venv(self) -> bool:\n return self.prefix.is_dir() and (self.prefix / 'pyvenv.cfg').is_file()", + "docstring": "Check if the prefix is a virtual environment.", + "type": "method", + "file_path": "pytorch\\tools\\nightly.py", + "ast_data": "FunctionDef name:is_venv arg:self arguments arg Return return:yes BoolOp Call Call" + }, + { + "library": "django", + "name": "FileDescriptor", + "source_code": "class FileDescriptor(DeferredAttribute):\n\n def __get__(self, instance, cls=None):\n if instance is None:\n return self\n file = super().__get__(instance, cls)\n if isinstance(file, str) or file is None:\n attr = self.field.attr_class(instance, self.field, file)\n instance.__dict__[self.field.attname] = attr\n elif isinstance(file, DatabaseDefault):\n attr = self.field.attr_class(instance, self.field, self.field.db_default)\n instance.__dict__[self.field.attname] = attr\n elif isinstance(file, File) and (not isinstance(file, FieldFile)):\n file_copy = self.field.attr_class(instance, self.field, file.name)\n file_copy.file = file\n file_copy._committed = False\n instance.__dict__[self.field.attname] = file_copy\n elif isinstance(file, FieldFile) and (not hasattr(file, 'field')):\n file.instance = instance\n file.field = self.field\n file.storage = self.field.storage\n elif isinstance(file, FieldFile) and instance is not file.instance:\n file.instance = instance\n return instance.__dict__[self.field.attname]\n\n def __set__(self, instance, value):\n instance.__dict__[self.field.attname] = value", + "docstring": "The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f)", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\files.py", + "ast_data": "ClassDef name:FileDescriptor FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Call Call If BoolOp Call Compare Assign Call Assign If Call Assign Call Assign If BoolOp Call Call Assign Call Assign Assign Assign If BoolOp Call Call Assign Assign Assign If BoolOp Call Compare Assign Return return:yes FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Assign" + }, + { + "library": "kornia", + "name": "make_samplers", + "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n gain = _range_bound(self.gain, 'gain').to(device, dtype)\n self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args=False)\n sign = _range_bound(self.sign, 'sign', bounds=(-1.0, 1.0), center=0.0).to(device, dtype)\n self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args=False)\n self.directions_sampler = UniformDistribution(0, 4, validate_args=False)", + "docstring": "Create samplers for generating random gaussian illumination parameters.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py", + "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Assign Call" + }, + { + "library": "django", + "name": "_topology", + "source_code": "def _topology(self, gptr):\n return GEOSGeometry(gptr, srid=self.srid)", + "docstring": "Return Geometry from the given pointer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:_topology arg:self arg:gptr arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y):\n self._fit_encodings_all(X, y)\n return self", + "docstring": "Fit the :class: to X and y. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- self : object Fitted encoder.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_unsafe_preserve_version_counter", + "source_code": "class _unsafe_preserve_version_counter(_DecoratorContextManager):\n\n def __init__(self, tensors: Union[torch.Tensor, tuple[torch.Tensor, ...]]) -> None:\n self.tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tensors\n assert isinstance(self.tensors, tuple)\n self.prev_versions = tuple((t._version for t in self.tensors))\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(self, *args) -> None:\n torch._C._autograd._unsafe_set_version_counter(self.tensors, self.prev_versions)", + "docstring": "DO NOT USE THIS UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING. This context manager can lead to arbitrary silent-correctness issues in any other part of your code (even the ones not touched directly by the context manager)! Ordinarily, autograd will track mutations to tensors by incrementing it's attribute. This is generally important for correctness, as for example, mutating a tensor that autograd has saved for the backwards pass can result in incorrect gradients, and autograd uses the version counter to detect and error out in this situation. However, there are rare instances where it might be useful to hide mutations from autograd. For example: if a tensor is very large, and you'd like to free its memory by storing it elsewhere, and re-populate the tensor right before it is needed by autograd. Args: tensor (torch.Tensor): the tensor in question, that you would like to preserve the version counter of. .. note:: This API does not apply to :ref:.", + "type": "class", + "file_path": "pytorch\\torch\\autograd\\grad_mode.py", + "ast_data": "ClassDef name:_unsafe_preserve_version_counter FunctionDef name:__init__ arg:self arg:tensors arguments arg arg Assign Call Call Assign Call FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arguments arg arg Call" + }, + { + "library": "django", + "name": "datetime_cast_time_sql", + "source_code": "def datetime_cast_time_sql(self, sql, params, tzname):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')", + "docstring": "Return the SQL to cast a datetime value to time value.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:datetime_cast_time_sql arg:self arg:sql arg:params arg:tzname arguments arg arg arg arg Raise Call" + }, + { + "library": "sphinx", + "name": "versionmodified", + "source_code": "class versionmodified(nodes.Admonition, nodes.TextElement):\n pass", + "docstring": "Node for version change entries. Currently used for \"versionadded\", \"versionchanged\", \"deprecated\" and \"versionremoved\" directives.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:versionmodified" + }, + { + "library": "scipy", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "An invalid version was found, users should refer to PEP 440.", + "type": "class", + "file_path": "scipy\\scipy\\_lib\\_pep440.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "pandas", + "name": "_format_native_types", + "source_code": "def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None) -> npt.NDArray[np.object_]:\n raise AbstractMethodError(self)", + "docstring": "Helper method for astype when converting to strings. Returns ------- ndarray[str]", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_format_native_types arg:self arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "generate", + "source_code": "def generate(self, **kwargs: Any) -> ChoiceCaller:\n raise NotImplementedError", + "docstring": "Generates a ChoiceCaller instance from the given arguments.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", + "ast_data": "FunctionDef name:generate arg:self arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "GetColocationGroups", + "source_code": "def GetColocationGroups(self):\n return tf_item.TF_GetColocationGroups(self.tf_item)", + "docstring": "Return a list of hard colocation constraints. All the nodes in a colocation tuple must be placed on the same device for the model to work. Returns: A list of colocation tuples.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\grappler\\item.py", + "ast_data": "FunctionDef name:GetColocationGroups arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_lr", + "source_code": "@override\ndef get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self.last_epoch == 0:\n return [group['lr'] * self.factor for group in self.optimizer.param_groups]\n if self.last_epoch != self.total_iters:\n return [group['lr'] for group in self.optimizer.param_groups]\n return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups]", + "docstring": "Compute the learning rate of each parameter group.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "SendTracebacks", + "source_code": "def SendTracebacks(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", + "docstring": "Send the tracebacks of ops in a Python graph definition.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py", + "ast_data": "FunctionDef name:SendTracebacks arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call" + }, + { + "library": "numpy", + "name": "_iswritemode", + "source_code": "def _iswritemode(self, mode):\n _writemodes = ('w', '+')\n return any((c in _writemodes for c in mode))", + "docstring": "Test if the given mode will open a file for writing.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_datasource.py", + "ast_data": "FunctionDef name:_iswritemode arg:self arg:mode arguments arg arg Assign Return return:yes Call Compare" + }, + { + "library": "pytorch", + "name": "main", + "source_code": "def main(self, log_path, other_datasets, nrows, heuristic_name, save_dot=False, ranking=False):\n df, choices, cat_feature2cats, dummy_col_2_col_val, metadata = self.get_df(log_path, nrows=nrows, apply_filters=True)\n df_train, df_val, df_test, feature_columns = self.custom_train_test_split(df)\n datasets = {'train': df_train, 'val': df_val, 'test': df_test}\n self.add_real_datasets(datasets, other_datasets, cat_feature2cats)\n max_depths = [5, 6, 7]\n min_samples_leafs = [1, 2, 5, 10]\n choice_columns = [f'{CHOICE_COL}_{choice}' for choice in choices]\n results_df, best_model, threshold = self.train_and_evaluate_models(datasets, feature_columns, choice_columns, max_depths, min_samples_leafs)\n print(results_df.to_string())\n for set_name in results_df['dataset'].unique():\n dataset_results = results_df[results_df['dataset'] == set_name]\n dataset_results = dataset_results.sort_values(by='correct')\n print(dataset_results.to_string() + '\\n')\n feature_names = feature_columns + choice_columns\n self.dt_to_python(best_model, metadata, feature_names, dummy_col_2_col_val, heuristic_name, threshold)", + "docstring": "Main function that trains a decision tree and generates a heuristic.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py", + "ast_data": "FunctionDef name:main arg:self arg:log_path arg:other_datasets arg:nrows arg:heuristic_name arg:save_dot arg:ranking arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Call Call For Call Assign Compare Assign Call Call Call Assign Call" + }, + { + "library": "scikit-learn", + "name": "requires_vector_input", + "source_code": "@property\ndef requires_vector_input(self):\n return self.k1.requires_vector_input or self.k2.requires_vector_input", + "docstring": "Returns whether the kernel is stationary.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes BoolOp" + }, + { + "library": "django", + "name": "spatialite_version_tuple", + "source_code": "def spatialite_version_tuple(self):\n version = self.spatialite_version()\n return (version, *get_version_tuple(version))", + "docstring": "Return the SpatiaLite version as a tuple (version string, major, minor, subminor).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py", + "ast_data": "FunctionDef name:spatialite_version_tuple arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "polygrid3d", + "source_code": "def polygrid3d(x, y, z, c):\n return pu._gridnd(polyval, c, x, y, z)", + "docstring": "Evaluate a 3-D polynomial on the Cartesian product of x, y and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- polyval, polyval2d, polygrid2d, polyval3d Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) array([[ 1., 13.], [ 6., 51.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polynomial.py", + "ast_data": "FunctionDef name:polygrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "add_post_transform", + "source_code": "def add_post_transform(self, transform: type[Transform]) -> None:\n self.registry.add_post_transform(transform)", + "docstring": "Register a Docutils transform to be applied before writing. Add the standard docutils :class: subclass *transform* to the list of transforms that are applied before Sphinx writes a document. :param transform: A transform class", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_post_transform arg:self arg:transform arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "subtract", + "source_code": "def subtract(inputs, **kwargs):\n return Subtract(**kwargs)(inputs)", + "docstring": "Functional interface to the layer. Args: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the difference of the inputs. Examples:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py", + "ast_data": "FunctionDef name:subtract arg:inputs arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, nhead, in_proj_container, attention_layer, out_proj):\n super().__init__()\n self.nhead = nhead\n self.in_proj_container = in_proj_container\n self.attention_layer = attention_layer\n self.out_proj = out_proj", + "docstring": "A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The attention layer. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). Examples:: >>> import torch >>> embed_dim, num_heads, bsz = 10, 5, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> MHA = MultiheadAttentionContainer(num_heads, in_proj_container, ScaledDotProduct(), torch.nn.Linear(embed_dim, embed_dim)) >>> query = torch.rand((21, bsz, embed_dim)) >>> key = value = torch.rand((16, bsz, embed_dim)) >>> attn_output, attn_weights = MHA(query, key, value) >>> print(attn_output.shape) >>> torch.Size([21, 64, 10])", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:nhead arg:in_proj_container arg:attention_layer arg:out_proj arguments arg arg arg arg arg Call Call Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "supercedes", + "source_code": "def supercedes(a, b):\n if isvar(b) and (not isvar(a)):\n return True\n s = unify(a, b)\n if s is False:\n return False\n s = {k: v for k, v in s.items() if not isvar(k) or not isvar(v)}\n if reify(a, s) == a:\n return True\n if reify(b, s) == b:\n return False", + "docstring": "``", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\match.py", + "ast_data": "FunctionDef name:supercedes arg:a arg:b arguments arg arg If BoolOp Call Call Return return:yes Assign Call If Compare Return return:yes Assign Call BoolOp Call Call If Compare Call Return return:yes If Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "partial_run", + "source_code": "@deprecation.deprecated('2023-06-01', 'This function is deprecated and we do not expect adding newfunctionality to it. Please do not have your code dependingon this function.')\ndef partial_run(self, handle, fetches, feed_dict=None):\n return self._run(handle, fetches, feed_dict, None, None)", + "docstring": "Continues the execution with more feeds and fetches. NOTE: This function is deprecated and we do not expect adding new functionality to it. Please do not have your code depending on this function. This is EXPERIMENTAL and subject to change. To use partial execution, a user first calls and then a sequence of . specifies the list of feeds and fetches that will be used in the subsequent calls. The optional argument allows the caller to override the value of tensors in the graph. See run() for more information. Below is a simple example: Args: handle: A handle for a sequence of partial runs. fetches: A single graph element, a list of graph elements, or a dictionary whose values are graph elements or lists of graph elements (see documentation for ). feed_dict: A dictionary that maps graph elements to values (described above). Returns: Either a single value if is a single graph element, or a list of values if is a list, or a dictionary with the same keys as if that is a dictionary (see documentation for ). Raises: tf.errors.OpError: Or one of its subclasses on error.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:partial_run arg:self arg:handle arg:fetches arg:feed_dict arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "splitext", + "source_code": "def splitext(self, the_path):\n base, ext = posixpath.splitext(the_path)\n if base.lower().endswith('.tar'):\n ext = base[-4:] + ext\n base = base[:-4]\n return (base, ext)", + "docstring": "Like os.path.splitext, but takes off .tar, too", + "type": "method", + "file_path": "django\\django\\core\\management\\templates.py", + "ast_data": "FunctionDef name:splitext arg:self arg:the_path arguments arg arg Assign Call If Call Call Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "_check_autocomplete_fields", + "source_code": "def _check_autocomplete_fields(self, obj):\n if not isinstance(obj.autocomplete_fields, (list, tuple)):\n return must_be('a list or tuple', option='autocomplete_fields', obj=obj, id='admin.E036')\n else:\n return list(chain.from_iterable([self._check_autocomplete_fields_item(obj, field_name, 'autocomplete_fields[%d]' % index) for index, field_name in enumerate(obj.autocomplete_fields)]))", + "docstring": "Check that is a list or tuple of model fields.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_autocomplete_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "django", + "name": "convex_hull", + "source_code": "@property\ndef convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)", + "docstring": "Return the smallest convex Polygon that contains all the points in this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:convex_hull arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "uniform_", + "source_code": "def uniform_(tensor: Tensor, a: float=0.0, b: float=1.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n if torch.overrides.has_torch_function_variadic(tensor):\n return torch.overrides.handle_torch_function(uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator)\n return _no_grad_uniform_(tensor, a, b, generator)", + "docstring": "Fill the input Tensor with values drawn from the uniform distribution. :math:. Args: tensor: an n-dimensional a: the lower bound of the uniform distribution b: the upper bound of the uniform distribution generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.uniform_(w)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\init.py", + "ast_data": "FunctionDef name:uniform_ arg:tensor arg:a arg:b arg:generator arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_validate_X_predict", + "source_code": "def _validate_X_predict(self, X):\n check_is_fitted(self)\n if self.estimators_[0]._support_missing_values(X):\n ensure_all_finite = 'allow-nan'\n else:\n ensure_all_finite = True\n X = validate_data(self, X, dtype=DTYPE, accept_sparse='csr', reset=False, ensure_all_finite=ensure_all_finite)\n if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):\n raise ValueError('No support for np.int64 index based sparse matrices')\n return X", + "docstring": "Validate X whenever one tries to predict, apply, predict_proba.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py", + "ast_data": "FunctionDef name:_validate_X_predict arg:self arg:X arguments arg arg Call If Call Assign Assign Assign Call If BoolOp Call BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, offset=(0.0, 0.0)):\n self._offset = offset", + "docstring": "Parameters ---------- offset : (float, float), default: (0, 0) The (x, y) offset to apply to the path, measured in points.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:offset arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "TanhTransform", + "source_code": "class TanhTransform(Transform):\n domain = constraints.real\n codomain = constraints.interval(-1.0, 1.0)\n bijective = True\n sign = +1\n\n def __eq__(self, other):\n return isinstance(other, TanhTransform)\n\n def _call(self, x):\n return x.tanh()\n\n def _inverse(self, y):\n return torch.atanh(y)\n\n def log_abs_det_jacobian(self, x, y):\n return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x))", + "docstring": "Transform via the mapping :math:. It is equivalent to .. code-block:: python ComposeTransform( [ AffineTransform(0.0, 2.0), SigmoidTransform(), AffineTransform(-1.0, 2.0), ] ) However this might not be numerically stable, thus it is recommended to use instead. Note that one should use when it comes to values.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "ClassDef name:TanhTransform Assign Assign Call Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "set_level", + "source_code": "def set_level(request, level):\n if not hasattr(request, '_messages'):\n return False\n request._messages.level = level\n return True", + "docstring": "Set the minimum level of messages to be recorded, and return ``, use the default level (see the get_level() function).", + "type": "function", + "file_path": "django\\django\\contrib\\messages\\api.py", + "ast_data": "FunctionDef name:set_level arg:request arg:level arguments arg arg If Call Return return:yes Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, *args, **kwargs):\n\n def replica_local_fn(*args, **kwargs):\n if any((isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs)))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs)\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result()\n result_t._metric_obj = self\n return result_t\n from tensorflow.python.keras.distribute import distributed_training_utils\n return distributed_training_utils.call_replica_local_fn(replica_local_fn, *args, **kwargs)", + "docstring": "Accumulates statistics and then computes metric result value. Args: *args: **kwargs: A mini-batch of inputs to the Metric, passed on to . Returns: The metric value tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg FunctionDef name:replica_local_fn arguments arg arg If Call Call Call Assign Assign Call Assign If Compare Call With Call Assign Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "capture_by_value", + "source_code": "def capture_by_value(self, graph: Any, tensor: core.Tensor, name: Optional[str]=None) -> core.Tensor:\n if isinstance(tensor, core.Value):\n if name is None:\n name = str(pywrap_tfe.TFE_Py_UID())\n if tensor.dtype in dtypes.TF_VALUE_DTYPES and functools.reduce(lambda a, b: a * b, tensor.shape, 1) <= _EAGER_CONST_THRESHOLD:\n graph_const = self.by_val_internal.get(id(tensor))\n if graph_const is None:\n graph_const = tensor._capture_as_const(name)\n if graph_const is None:\n graph_const = self._create_placeholder_helper(graph, tensor, name)\n self.add_or_replace(key=id(tensor), external=tensor, internal=graph_const, is_by_ref=False)\n graph.inputs.append(graph_const)\n graph_const._record_tape(tensor)\n return graph_const\n return self._create_placeholder_helper(graph, tensor, name)\n if tensor.graph is not graph:\n graph._validate_in_scope(tensor)\n if name is None:\n assert tensor.op is not None, (tensor.__class__, dir(tensor), tensor.__class__.__name__)\n name = tensor.op.name\n return graph._capture_helper(tensor, name)\n return tensor", + "docstring": "Captures if it's external to this graph. If is from a different graph, returns a placeholder for it. and the placeholder will appear in self.captures, and the placeholder will appear in self.inputs. Multiple calls to this method with the same argument will return the same placeholder. If is from this graph, returns . Args: graph: The FuncGraph that captures this tensor. tensor: Tensor. May be from this FuncGraph or a different graph. name: Optional name if a placeholder is created. Returns: Tensor from this FuncGraph. Raises: InaccessibleTensorError: if any tensors are accessed in a manner that bypasses the mechanisms required for the data dependencies to be correctly wired.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py", + "ast_data": "FunctionDef name:capture_by_value arg:self arg:graph arg:tensor arg:name arguments arg arg arg arg If Call If Compare Assign Call Call If BoolOp Compare Compare Call arguments arg arg Assign Call Call If Compare Assign Call If Compare Assign Call Call Call Call Call Return return:yes Return return:yes Call If Compare Call If Compare Compare Call Assign Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_tpu_topology", + "source_code": "def _create_tpu_topology(core_locations: List[_CoreLocation], num_tasks: int, num_devices_per_task: int) -> topology.Topology:\n assert min([l.x for l in core_locations]) == 0\n assert min([l.y for l in core_locations]) == 0\n assert min([l.z for l in core_locations]) == 0\n assert min([l.core for l in core_locations]) == 0\n x_max = max([l.x for l in core_locations])\n y_max = max([l.y for l in core_locations])\n z_max = max([l.z for l in core_locations])\n core_max = max([l.core for l in core_locations])\n mesh_shape = [x_max + 1, y_max + 1, z_max + 1, core_max + 1]\n device_coordinates = [[l.x, l.y, l.z, l.core] for l in core_locations]\n device_coordinates = numpy_compat.np_asarray(device_coordinates).reshape(num_tasks, num_devices_per_task, 4)\n return topology.Topology(mesh_shape=mesh_shape, device_coordinates=device_coordinates)", + "docstring": "Returns a Topology object build from a _CoreLocation list. Args: core_locations: A list of _CoreLocation objects sorted first by TF task ID and then by per-task device ordinals. num_tasks: The number of TF tasks in the cluster. num_devices_per_task: The number of TPU devices local to each task.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py", + "ast_data": "FunctionDef name:_create_tpu_topology arg:core_locations arg:num_tasks arg:num_devices_per_task arguments arg arg arg Compare Call Compare Call Compare Call Compare Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "LogSigmoid", + "source_code": "class LogSigmoid(Module):\n\n def forward(self, input: Tensor) -> Tensor:\n return F.logsigmoid(input)", + "docstring": "Applies the Logsigmoid function element-wise. .. math:: \\text{LogSigmoid}(x) = \\log\\left(\\frac{ 1 }{ 1 + \\exp(-x)}\\right) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/LogSigmoid.png Examples:: >>> m = nn.LogSigmoid() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:LogSigmoid FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_clear", + "source_code": "def _clear(self, event):\n if self.ignore(event) or self.canvas.is_saving():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)", + "docstring": "Internal event handler to clear the buttons.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_clear arg:self arg:event arguments arg arg If BoolOp Call Call Return return:no Assign Call Call" + }, + { + "library": "tensorflow", + "name": "clone_model_on_replicas", + "source_code": "def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):\n with backend.get_graph().as_default(), strategy.scope():\n distributed_model = strategy.extended.call_for_each_replica(_clone_and_build_model, args=(model, mode, inputs, targets))\n set_distributed_model(model, mode, distributed_model)\n if mode == ModeKeys.TRAIN:\n model._make_callback_model(distributed_model)", + "docstring": "Create a cloned model on each replica.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:clone_model_on_replicas arg:model arg:strategy arg:mode arg:inputs arg:targets arguments arg arg arg arg arg With Call Call Call Assign Call Call If Compare Call" + }, + { + "library": "tensorflow", + "name": "NewSession", + "source_code": "def NewSession(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", + "docstring": "Starts a profiling session, blocks until it completes. TPUProfileAnalysis service delegate this to TPUProfiler service. Populate the profiled data in repository, then return status to caller.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py", + "ast_data": "FunctionDef name:NewSession arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call" + }, + { + "library": "django", + "name": "_setup", + "source_code": "def _setup(self, name=None):\n settings_module = os.environ.get(ENVIRONMENT_VARIABLE)\n if not settings_module:\n desc = 'setting %s' % name if name else 'settings'\n raise ImproperlyConfigured('Requested %s, but settings are not configured. You must either define the environment variable %s or call settings.configure() before accessing settings.' % (desc, ENVIRONMENT_VARIABLE))\n self._wrapped = Settings(settings_module)", + "docstring": "Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually.", + "type": "method", + "file_path": "django\\django\\conf\\__init__.py", + "ast_data": "FunctionDef name:_setup arg:self arg:name arguments arg arg Assign Call If Assign Raise Call Assign Call" + }, + { + "library": "django", + "name": "get_ordering", + "source_code": "def get_ordering(self, request):\n return self.ordering or ()", + "docstring": "Hook for specifying field ordering.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_ordering arg:self arg:request arguments arg arg Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "flat_outputs", + "source_code": "@property\ndef flat_outputs(self) -> List[trace.TraceType]:\n if not hasattr(self, '_cached_flat_outputs'):\n if self.output is not None:\n self._cached_flat_outputs = self.output.flatten()\n return self._cached_flat_outputs", + "docstring": "Flat tensor outputs returned by this FunctionType.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:flat_outputs arg:self arguments arg If Call If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "remove_load_call_method", + "source_code": "def remove_load_call_method(instructions: list[Instruction]) -> list[Instruction]:\n assert sys.version_info < (3, 11)\n rewrites = {'LOAD_METHOD': 'LOAD_ATTR', 'CALL_METHOD': 'CALL_FUNCTION'}\n for inst in instructions:\n if inst.opname in rewrites:\n inst.opname = rewrites[inst.opname]\n inst.opcode = dis.opmap[inst.opname]\n return instructions", + "docstring": "LOAD_METHOD puts a NULL on the stack which causes issues, so remove it", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:remove_load_call_method arg:instructions arguments arg Compare Assign For If Compare Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "get_media_prefix", + "source_code": "@register.tag\ndef get_media_prefix(parser, token):\n return PrefixNode.handle_token(parser, token, 'MEDIA_URL')", + "docstring": "Populate a template variable with the media prefix, ``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %}", + "type": "function", + "file_path": "django\\django\\templatetags\\static.py", + "ast_data": "FunctionDef name:get_media_prefix arg:parser arg:token arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_datetime_to_pdf", + "source_code": "def _datetime_to_pdf(d):\n r = d.strftime('D:%Y%m%d%H%M%S')\n z = d.utcoffset()\n if z is not None:\n z = z.seconds\n elif time.daylight:\n z = time.altzone\n else:\n z = time.timezone\n if z == 0:\n r += 'Z'\n elif z < 0:\n r += \"+%02d'%02d'\" % (-z // 3600, -z % 3600)\n else:\n r += \"-%02d'%02d'\" % (z // 3600, z % 3600)\n return r", + "docstring": "Convert a datetime to a PDF string representing it. Used for PDF and PGF.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:_datetime_to_pdf arg:d arguments arg Assign Call Assign Call If Compare Assign If Assign Assign If Compare If Compare Return return:yes" + }, + { + "library": "pandas", + "name": "_add_nat", + "source_code": "@final\ndef _add_nat(self) -> Self:\n if isinstance(self.dtype, PeriodDtype):\n raise TypeError(f'Cannot add {type(self).__name__} and {type(NaT).__name__}')\n result = np.empty(self.shape, dtype=np.int64)\n result.fill(iNaT)\n result = result.view(self._ndarray.dtype)\n return type(self)._simple_new(result, dtype=self.dtype, freq=None)", + "docstring": "Add pd.NaT to self", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_add_nat arg:self arguments arg If Call Raise Call Call Call Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "MethodMapping", + "source_code": "class MethodMapping:\n\n def __init__(self):\n self._routes = []\n\n def __iter__(self):\n return iter(self._routes)\n\n def add(self, *, caller, callee):\n if caller not in METHODS:\n raise ValueError(f'Given caller:{caller} is not a valid method. Valid methods are: {METHODS}')\n if callee not in METHODS:\n raise ValueError(f'Given callee:{callee} is not a valid method. Valid methods are: {METHODS}')\n self._routes.append(MethodPair(caller=caller, callee=callee))\n return self\n\n def _serialize(self):\n result = list()\n for route in self._routes:\n result.append({'caller': route.caller, 'callee': route.callee})\n return result\n\n def __repr__(self):\n return str(self._serialize())\n\n def __str__(self):\n return str(repr(self))", + "docstring": "Stores the mapping between caller and callee methods for a router. This class is primarily used in a `` instances. .. versionadded:: 1.3", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "ClassDef name:MethodMapping FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:add arg:self arguments arg arg arg If Compare Raise Call If Compare Raise Call Call Call Return return:yes FunctionDef name:_serialize arg:self arguments arg Assign Call For Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "name_from_scope_name", + "source_code": "def name_from_scope_name(name) -> str:\n return name[:-1] if name and name[-1] == '/' else name", + "docstring": "Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:name_from_scope_name arg:name arguments arg Return return:yes BoolOp Compare" + }, + { + "library": "pytorch", + "name": "_prepare_standalone_module_fx", + "source_code": "def _prepare_standalone_module_fx(model: torch.nn.Module, qconfig_mapping: Union[QConfigMapping, dict[str, Any]], is_qat: bool, example_inputs: tuple[Any, ...], prepare_custom_config: Union[PrepareCustomConfig, dict[str, Any], None]=None, backend_config: Union[BackendConfig, dict[str, Any], None]=None) -> GraphModule:\n return _prepare_fx(model, qconfig_mapping, is_qat, example_inputs, prepare_custom_config, backend_config=backend_config, is_standalone_module=True)", + "docstring": "[Internal use only] Prepare a standalone module, so that it can be used when quantizing the parent module. standalone_module means it a submodule that is not inlined in parent module, and will be quantized separately as one unit. How the standalone module is observed is specified by and in the prepare_custom_config for the standalone module Returns: * model(GraphModule): prepared standalone module. It has these attributes in model.meta: * : a list of indexes for the graph input that is expected to be quantized, same as input_quantized_idxs configuration provided for the standalone module * : a list of indexs for the graph output that is quantized same as input_quantized_idxs configuration provided for the standalone module", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py", + "ast_data": "FunctionDef name:_prepare_standalone_module_fx arg:model arg:qconfig_mapping arg:is_qat arg:example_inputs arg:prepare_custom_config arg:backend_config arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "clone", + "source_code": "def clone(self):\n return self.__class__(output_graph=self.output_graph_weakref(), id_to_variable=dict(self.id_to_variable), store_attr_mutations={k: dict(v) for k, v in self.store_attr_mutations.items()}, keepalive=list(self.keepalive), save_for_backward=self.save_for_backward, tensor_hooks=self.tensor_hooks)", + "docstring": "Create a shallow copy", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\side_effects.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_trackable_to_cpu", + "source_code": "def _copy_trackable_to_cpu(self, object_map):\n if self in object_map:\n for v in self._vars:\n v._copy_trackable_to_cpu(object_map)\n else:\n copied_vars = []\n for v in self._vars:\n v._copy_trackable_to_cpu(object_map)\n copied_vars.append(object_map[v])\n new_var = TPUReplicatedVariable(copied_vars, name=self.name)\n object_map[self] = new_var", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py", + "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare For Call Assign For Call Call Assign Call Assign" + }, + { + "library": "pytorch", + "name": "mocked_modules", + "source_code": "def mocked_modules(self) -> list[str]:\n return self._nodes_with_action_type(_ModuleProviderAction.MOCK)", + "docstring": "Return all modules that are currently mocked. Returns: A list containing the names of modules which will be mocked in this package.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:mocked_modules arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, ax, label, initial='', *, color='.95', hovercolor='1', label_pad=0.01, textalignment='left'):\n super().__init__(ax)\n self._text_position = _api.check_getitem({'left': 0.05, 'center': 0.5, 'right': 0.95}, textalignment=textalignment)\n self.label = ax.text(-label_pad, 0.5, label, transform=ax.transAxes, verticalalignment='center', horizontalalignment='right')\n self.text_disp = self.ax.text(self._text_position, 0.5, initial, transform=self.ax.transAxes, verticalalignment='center', horizontalalignment=textalignment, parse_math=False)\n self._observers = cbook.CallbackRegistry(signals=['change', 'submit'])\n ax.set(xlim=(0, 1), ylim=(0, 1), navigate=False, facecolor=color, xticks=[], yticks=[])\n self.cursor_index = 0\n self.cursor = ax.vlines(0, 0, 0, visible=False, color='k', lw=1, transform=mpl.transforms.IdentityTransform())\n self.connect_event('button_press_event', self._click)\n self.connect_event('button_release_event', self._release)\n self.connect_event('motion_notify_event', self._motion)\n self.connect_event('key_press_event', self._keypress)\n self.connect_event('resize_event', self._resize)\n self.color = color\n self.hovercolor = hovercolor\n self.capturekeystrokes = False", + "docstring": "Parameters ---------- ax : The instance the button will be placed into. label : str Label for this text box. initial : str Initial value in the text box. color : :mpltype: The color of the box. hovercolor : :mpltype: The color of the box when the mouse is over it. label_pad : float The distance between the label and the right side of the textbox. textalignment : {'left', 'center', 'right'} The horizontal location of the text.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ax arg:label arg:initial arguments arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Assign Call Call Call Call Call Call Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "enable_wrap", + "source_code": "@contextlib.contextmanager\ndef enable_wrap(*, wrapper_cls: Any, **wrapper_kwargs: Any) -> Generator[None, None, None]:\n kwargs = {'wrapper_cls': wrapper_cls, **wrapper_kwargs}\n with _ConfigAutoWrap(**kwargs):\n yield", + "docstring": "Context manager to wrap modules using a wrapper. Useful for when you'd like to apply the same configuration arguments to all child modules that you wrap. A particularly important use case is wrapping large layers so that they get sharded (in-place) during initialization, to avoid running out of system memory. Large layers can indicate that they should be sharded via the `wrapwrapFullyShardedDataParallel` instances inside the context", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py", + "ast_data": "FunctionDef name:enable_wrap arguments arg arg Assign With Call" + }, + { + "library": "pytorch", + "name": "template_dir_for_comments", + "source_code": "def template_dir_for_comments(self) -> str:\n return os.path.relpath(self.template_dir, os.path.dirname(__file__))", + "docstring": "This needs to be deterministic. The template dir is an absolute path that varies across builds. So, just use the path relative to this file, which will point to the codegen source but will be stable.", + "type": "method", + "file_path": "pytorch\\torchgen\\utils.py", + "ast_data": "FunctionDef name:template_dir_for_comments arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "sepia_from_rgb", + "source_code": "def sepia_from_rgb(input: Tensor, rescale: bool=True, eps: float=1e-06) -> Tensor:\n if len(input.shape) < 3 or input.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {input.shape}')\n r = input[..., 0, :, :]\n g = input[..., 1, :, :]\n b = input[..., 2, :, :]\n r_out = 0.393 * r + 0.769 * g + 0.189 * b\n g_out = 0.349 * r + 0.686 * g + 0.168 * b\n b_out = 0.272 * r + 0.534 * g + 0.131 * b\n sepia_out = torch.stack([r_out, g_out, b_out], dim=-3)\n if rescale:\n max_values = sepia_out.amax(dim=-1).amax(dim=-1)\n sepia_out = sepia_out / (max_values[..., None, None] + eps)\n return sepia_out", + "docstring": "Apply to a tensor the sepia filter. Args: input: the input tensor with shape of :math:. rescale: If True, the output tensor will be rescaled (max values be 1. or 255). eps: scalar to enforce numerical stability. Returns: Tensor: The sepia tensor of same size and numbers of channels as the input with shape :math:. Example: >>> input = torch.ones(3, 1, 1) >>> sepia_from_rgb(input, rescale=False) tensor([[[1.3510]], [[1.2030]], [[0.9370]]])", + "type": "function", + "file_path": "kornia\\kornia\\color\\sepia.py", + "ast_data": "FunctionDef name:sepia_from_rgb arg:input arg:rescale arg:eps arguments arg arg arg If BoolOp Compare Call Compare Raise Call Assign Assign Assign Assign Assign Assign Assign Call If Assign Call Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_list_display", + "source_code": "def get_list_display(self, request):\n return self.list_display", + "docstring": "Return a sequence containing the fields to be displayed on the changelist.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_list_display arg:self arg:request arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "srs", + "source_code": "@srs.setter\ndef srs(self, value):\n if isinstance(value, SpatialReference):\n srs = value\n elif isinstance(value, (int, str)):\n srs = SpatialReference(value)\n else:\n raise ValueError('Could not create a SpatialReference from input.')\n capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())\n self._flush()", + "docstring": "Set the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:srs arg:self arg:value arguments arg arg If Call Assign If Call Assign Call Raise Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "formatter", + "source_code": "@property\ndef formatter(self):\n return self.long_axis.get_major_formatter()", + "docstring": "Major tick label for the colorbar.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:formatter arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X):\n return super().predict_proba(X)", + "docstring": "Return posterior probabilities of classification. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples/test vectors. Returns ------- C : ndarray of shape (n_samples, n_classes) Posterior probabilities of classification per class.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_generate_unsampled_indices", + "source_code": "def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):\n sample_indices = _generate_sample_indices(random_state, n_samples, n_samples_bootstrap)\n sample_counts = np.bincount(sample_indices, minlength=n_samples)\n unsampled_mask = sample_counts == 0\n indices_range = np.arange(n_samples)\n unsampled_indices = indices_range[unsampled_mask]\n return unsampled_indices", + "docstring": "Private function used to forest._set_oob_score function.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py", + "ast_data": "FunctionDef name:_generate_unsampled_indices arg:random_state arg:n_samples arg:n_samples_bootstrap arguments arg arg arg Assign Call Assign Call Assign Compare Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "tmin", + "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,))\ndef tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):\n xp = array_namespace(a)\n max_ = xp.iinfo(a.dtype).max if xp.isdtype(a.dtype, 'integral') else xp.inf\n a, mask = _put_val_to_limits(a, (lowerlimit, None), (inclusive, None), val=max_, xp=xp)\n res = xp.min(a, axis=axis)\n invalid = xp.all(mask, axis=axis)\n if is_lazy_array(invalid) or xp.any(invalid):\n res = xp_promote(res, force_floating=True, xp=xp)\n res = xp.where(invalid, xp.nan, res)\n return res[()] if res.ndim == 0 else res", + "docstring": "Compute the trimmed minimum. This function finds the minimum value of an array along the specified axis, but only considering values greater than a specified lower limit. Parameters ---------- a : array_like Array of values. lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. Returns ------- tmin : float, int or ndarray Trimmed minimum. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmin(x) 0 >>> stats.tmin(x, 13) 13 >>> stats.tmin(x, 13, inclusive=False) 14", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:tmin arg:a arg:lowerlimit arg:axis arg:inclusive arg:nan_policy arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes Compare Call Call arguments arg arguments arg arg" + }, + { + "library": "pytorch", + "name": "InconsistentMetadata", + "source_code": "class InconsistentMetadata(Exception):\n pass", + "docstring": "Exception that is thrown when AutoHeuristic tries to log data to a file where the metadata stored in the file does not match the metadata it would store if the file didn't exist.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py", + "ast_data": "ClassDef name:InconsistentMetadata" + }, + { + "library": "pytorch", + "name": "_vmap_for_bhqkv", + "source_code": "def _vmap_for_bhqkv(fn: Callable, prefix: tuple[Optional[int], ...], suffix: tuple[Optional[int], ...]=(), out_dims: Union[int, list[Optional[int]]]=0, group_dim: bool=False):\n dimensions: list[tuple[None | int, None | int, None | int, None | int]] = []\n dimensions = [(None, None, None, 0), (None, None, 0, None), (None, 0, None, None)]\n if group_dim:\n dimensions += [(None, 0, None, None)]\n dimensions += [(0, None, None, None)]\n for dims in dimensions:\n fn = torch.vmap(fn, in_dims=prefix + dims + suffix, out_dims=out_dims)\n return fn", + "docstring": "Used to vmap both score_mods and mask_mods over 4-dimensional/5-dimension inputs. Mapping over the [b, hq, q_idx, kv_idx] or [b, hkv, g, q_idx, kv_idx] dimensions. Args: fn (callable): The function to vmap. prefix (tuple): The prefix of the vmap. For score mod functions, this should be set to (0,). For mask_mods = () suffix (tuple): We need to add (0,) if gradOut is being mapped over, and (None,) * len(other_buffers). out_dims (tuple): For forward cases, keep this as the default 0 since we are only returning 1 output. For backwards, the joint graph returns grads for B, H, Q_idx, KV_idx and other_buffers, so we set this to (0, None, None, None, None) + (None,) * len(other_buffers). Returns: callable: The vmapped function.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:_vmap_for_bhqkv arg:fn arg:prefix arg:suffix arg:out_dims arg:group_dim arguments arg arg arg arg arg Assign If For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "conv2d_inference_rule", + "source_code": "@register_inference_rule(Conv2d)\ndef conv2d_inference_rule(n: Node, module_instance):\n assert isinstance(n.args[0], Node)\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)\n arg_type = n.args[0].type\n curr_node_type = expand_to_tensor_dim(n.type, 4)\n if is_consistent(arg_type.__args__[1], module_instance.in_channels):\n w_in = arg_type.__args__[3]\n h_in = arg_type.__args__[2]\n h_out = calculate_out_dimension(h_in, module_instance, 0)\n w_out = calculate_out_dimension(w_in, module_instance, 1)\n new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))\n gub = get_greatest_upper_bound(new_type, curr_node_type)\n n.type = gub\n return n.type\n else:\n raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')", + "docstring": "Given a Conv2D instance and a node check the following conditions: - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W) - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4') - x_2 is consistent with the module's in_channels - let o = (x_1, out_channels, H_out, W_out) then the output is the greatest upper bound of o and the existing node type t'.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:conv2d_inference_rule arg:n arg:module_instance arguments arg arg Call Assign Call Assign Assign Call If Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Raise Call Call" + }, + { + "library": "kornia", + "name": "shear", + "source_code": "def shear(tensor: Tensor, shear: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=False) -> Tensor:\n if not isinstance(tensor, Tensor):\n raise TypeError(f'Input tensor type is not a Tensor. Got {type(tensor)}')\n if not isinstance(shear, Tensor):\n raise TypeError(f'Input shear type is not a Tensor. Got {type(shear)}')\n if len(tensor.shape) not in (3, 4):\n raise ValueError(f'Invalid tensor shape, we expect CxHxW or BxCxHxW. Got: {tensor.shape}')\n shear_matrix: Tensor = _compute_shear_matrix(shear)\n return affine(tensor, shear_matrix[..., :2, :3], mode, padding_mode, align_corners)", + "docstring": "Shear the tensor. .. image:: _static/img/shear.png Args: tensor: The image tensor to be skewed with shape of :math:. shear: tensor containing the angle to shear in the x and y direction. The tensor must have a shape of (B, 2), where B is batch size, last dimension contains shx shy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The skewed tensor with shape same as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> shear_factor = torch.tensor([[0.5, 0.0]]) >>> out = shear(img, shear_factor) >>> print(out.shape) torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "FunctionDef name:shear arg:tensor arg:shear arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_run_conversion", + "source_code": "def _run_conversion(self):\n grappler_session_config = config_pb2.ConfigProto()\n custom_rewriter_config = _get_tensorrt_rewriter_config(conversion_params=self._conversion_params, is_dynamic_op=self._is_dynamic_op, max_batch_size=self._max_batch_size, disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers, use_implicit_batch=True)\n grappler_session_config.graph_options.rewrite_options.CopyFrom(custom_rewriter_config)\n self._converted_graph_def = tf_optimizer.OptimizeGraph(grappler_session_config, self._grappler_meta_graph_def, graph_id=b'tf_graph')\n self._converted = True", + "docstring": "Run Grappler's OptimizeGraph() tool to convert the graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py", + "ast_data": "FunctionDef name:_run_conversion arg:self arguments arg Assign Call Assign Call Call Assign Call Assign" + }, + { + "library": "pytorch", + "name": "FakeQuantizeBase", + "source_code": "class FakeQuantizeBase(ABC, Module):\n fake_quant_enabled: torch.Tensor\n observer_enabled: torch.Tensor\n\n def __init__(self) -> None:\n super().__init__()\n self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))\n self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))\n\n @abstractmethod\n def forward(self, x):\n pass\n\n @abstractmethod\n def calculate_qparams(self, **kwargs):\n pass\n\n @torch.jit.export\n def enable_fake_quant(self, enabled: bool=True) -> None:\n self.fake_quant_enabled[0] = 1 if enabled else 0\n\n @torch.jit.export\n def disable_fake_quant(self):\n self.enable_fake_quant(False)\n\n @torch.jit.export\n def enable_observer(self, enabled: bool=True) -> None:\n self.observer_enabled[0] = 1 if enabled else 0\n\n @torch.jit.export\n def disable_observer(self):\n self.enable_observer(False)\n\n @classmethod\n def with_args(cls, **kwargs):\n fake_quant_constructor = _with_args(cls, **kwargs)\n fake_quant_constructor.__module__ = 'torch.ao.quantization.fake_quantize'\n return fake_quant_constructor", + "docstring": "Base fake quantize module. Base fake quantize module Any fake quantize implementation should derive from this class. Concrete fake quantize module should follow the same API. In forward, they will update the statistics of the observed Tensor and fake quantize the input. They should also provide a function that computes the quantization parameters given the collected statistics.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py", + "ast_data": "ClassDef name:FakeQuantizeBase FunctionDef name:__init__ arg:self arguments arg Call Call Call Call Call Call FunctionDef name:forward arg:self arg:x arguments arg arg FunctionDef name:calculate_qparams arg:self arguments arg arg FunctionDef name:enable_fake_quant arg:self arg:enabled arguments arg arg Assign FunctionDef name:disable_fake_quant arg:self arguments arg Call FunctionDef name:enable_observer arg:self arg:enabled arguments arg arg Assign FunctionDef name:disable_observer arg:self arguments arg Call FunctionDef name:with_args arg:cls arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_current_name_scope", + "source_code": "@tf_export('get_current_name_scope', v1=[])\ndef get_current_name_scope() -> str:\n ctx = context.context()\n if ctx.executing_eagerly():\n return ctx.scope_name.rstrip('/')\n else:\n return get_default_graph().get_name_scope()", + "docstring": "Returns current full name scope specified by s. For example, In other words, returns the op name prefix that will be prepended to, if an op is created at that place. Note that resets the name scope stack as shown below.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_current_name_scope arguments Assign Call If Call Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "make_indexer", + "source_code": "def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:\n\n def indexer(index):\n assert len(index) == len(self.stride)\n assert len(index) == len(self.size)\n result = self.offset\n for idx, stride, sz in zip(index, self.stride, self.size):\n if sz != 1:\n result = result + idx * stride\n return result\n return indexer", + "docstring": "A closure containing math to read a given element", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:make_indexer arg:self arguments arg FunctionDef name:indexer arg:index arguments arg Compare Call Call Compare Call Call Assign For Call If Compare Assign Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "can_fuse_horizontal", + "source_code": "@staticmethod\ndef can_fuse_horizontal(scheduler: Scheduler, node1: BaseSchedulerNode, node2: BaseSchedulerNode, shared_data_score: int) -> bool:\n if shared_data_score < config.score_fusion_memory_threshold:\n WhyNoFuse(node1, node2)('score_fusion_memory_threshold')\n return False\n if scheduler.are_long_distant_nodes(node1, node2):\n WhyNoFuse(node1, node2)('Nodes are too far away. Fusing them may increase peak memory.')\n return False\n return True", + "docstring": "Hook for heuristics to prevent horizontal (consumer/consumer) fusions", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\choices.py", + "ast_data": "FunctionDef name:can_fuse_horizontal arg:scheduler arg:node1 arg:node2 arg:shared_data_score arguments arg arg arg arg If Compare Call Call Return return:yes If Call Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_split_by_task", + "source_code": "def _split_by_task(devices, values):\n num_devices = len(devices)\n if num_devices != len(values):\n raise ValueError('len(devices) must equal len(values)')\n per_task_devices = collections.OrderedDict()\n per_task_values = collections.OrderedDict()\n for d in range(num_devices):\n d_spec = device_lib.DeviceSpec.from_string(devices[d])\n if not hasattr(d_spec, 'task') or d_spec.task is None:\n assert False, 'failed to parse device %s' % devices[d]\n index = (d_spec.job or 'localhost', d_spec.replica or 0, d_spec.task)\n if index not in per_task_devices:\n per_task_devices[index] = []\n per_task_values[index] = []\n per_task_devices[index].append(devices[d])\n per_task_values[index].append(values[d])\n return (list(per_task_devices.values()), list(per_task_values.values()))", + "docstring": "Partition devices and values by common task. Args: devices: list of device name strings values: list of of same length as devices. Returns: (per_task_devices, per_task_values) where both values are lists of lists with isomorphic structure: the outer list is indexed by task, and the inner list has length of the number of values belonging to that task. per_task_devices contains the specific devices to which the values are local, and per_task_values contains the corresponding values. Raises: ValueError: devices must be same length as values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_split_by_task arg:devices arg:values arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call For Call Assign Call If BoolOp Call Compare Assign BoolOp BoolOp If Compare Assign Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "scipy", + "name": "black_tophat", + "source_code": "def black_tophat(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0, *, axes=None):\n input = np.asarray(input)\n if size is not None and footprint is not None:\n warnings.warn('ignoring size because footprint is set', UserWarning, stacklevel=2)\n tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin, axes=axes)\n tmp = grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin, axes=axes)\n if tmp is None:\n tmp = output\n if input.dtype == np.bool_ and tmp.dtype == np.bool_:\n np.bitwise_xor(tmp, input, out=tmp)\n else:\n np.subtract(tmp, input, out=tmp)\n return tmp", + "docstring": "Multidimensional black tophat filter. Parameters ---------- input : array_like Input. size : tuple of ints, optional Shape of a flat and full structuring element used for the filter. Optional if or is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the black tophat filter. structure : array of ints, optional Structuring element used for the filter. may be a non-flat structuring element. The array applies offsets to the pixels in a neighborhood (the offset is additive during dilation and subtractive during erosion) output : array, optional An array used for storing the output of the filter may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The parameter determines how the array borders are handled, where is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if is 'constant'. Default is 0.0. origin : scalar, optional The parameter controls the placement of the filter. Default 0 axes : tuple of int or None The axes over which to apply the filter. If None, is filtered along all axes. If an tuple is provided, its length must match the number of axes. Returns ------- black_tophat : ndarray Result of the filter of with . See Also -------- white_tophat, grey_opening, grey_closing Examples -------- Change dark peak to bright peak and subtract background. >>> from scipy.ndimage import generate_binary_structure, black_tophat >>> import numpy as np >>> square = generate_binary_structure(rank=2, connectivity=3) >>> dark_on_gray = np.array([[7, 6, 6, 6, 7], ... [6, 5, 4, 5, 6], ... [6, 4, 0, 4, 6], ... [6, 5, 4, 5, 6], ... [7, 6, 6, 6, 7]]) >>> black_tophat(input=dark_on_gray, structure=square) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 5, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]])", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_morphology.py", + "ast_data": "FunctionDef name:black_tophat arg:input arg:size arg:footprint arg:structure arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg arg arg Assign Call If BoolOp Compare Compare Call Assign Call Assign Call If Compare Assign If BoolOp Compare Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "benchmark_map_and_filter_fusion", + "source_code": "def benchmark_map_and_filter_fusion(self):\n chain_lengths = [0, 1, 2, 5, 10, 20, 50]\n for chain_length in chain_lengths:\n self._benchmark_map_and_filter_fusion(chain_length=chain_length, optimize_dataset=False)\n self._benchmark_map_and_filter_fusion(chain_length=chain_length, optimize_dataset=True)", + "docstring": "Evaluates performance map of fusion.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\optimize_benchmark.py", + "ast_data": "FunctionDef name:benchmark_map_and_filter_fusion arg:self arguments arg Assign For Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None, **params):\n self._checked_cv_orig = check_cv(self.cv, y, classifier=is_classifier(self.estimator))\n routed_params = self._get_routed_params_for_fit(params)\n self._check_input_parameters(X=X, y=y, split_params=routed_params.splitter.split)\n self._n_samples_orig = _num_samples(X)\n super().fit(X, y=y, **params)\n self.best_score_ = self.cv_results_['mean_test_score'][self.best_index_]\n return self", + "docstring": "Run fit with all sets of parameters. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_output), optional Target relative to X for classification or regression; None for unsupervised learning. **params : dict of string -> object Parameters passed to the `` method of the estimator. Returns ------- self : object Instance of fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search_successive_halving.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Call Assign Return return:yes Call" + }, + { + "library": "django", + "name": "_get_elided_page_range", + "source_code": "def _get_elided_page_range(self, number, num_pages, page_range, on_each_side=3, on_ends=2):\n if num_pages <= (on_each_side + on_ends) * 2:\n for page in page_range:\n yield page\n return\n if number > 1 + on_each_side + on_ends + 1:\n for page in range(1, on_ends + 1):\n yield page\n yield self.ELLIPSIS\n for page in range(number - on_each_side, number + 1):\n yield page\n else:\n for page in range(1, number + 1):\n yield page\n if number < num_pages - on_each_side - on_ends - 1:\n for page in range(number + 1, number + on_each_side + 1):\n yield page\n yield self.ELLIPSIS\n for page in range(num_pages - on_ends + 1, num_pages + 1):\n yield page\n else:\n for page in range(number + 1, num_pages + 1):\n yield page", + "docstring": "Return a 1-based range of pages with some values elided. If the page range is larger than a given size, the whole range is not provided and a compact form is returned instead, e.g. for a paginator with 50 pages, if page 43 were the current page, the output, with the default arguments, would be: 1, 2, …, 40, 41, 42, 43, 44, 45, 46, …, 49, 50.", + "type": "method", + "file_path": "django\\django\\core\\paginator.py", + "ast_data": "FunctionDef name:_get_elided_page_range arg:self arg:number arg:num_pages arg:page_range arg:on_each_side arg:on_ends arguments arg arg arg arg arg arg If Compare For Return return:no If Compare For Call For Call For Call If Compare For Call For Call For Call" + }, + { + "library": "pytorch", + "name": "EqualizationQConfig", + "source_code": "class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])):\n __slots__ = ()\n\n def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity):\n if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module):\n raise ValueError('EqualizationQConfig received observer instance, please pass observer class instead. ' + 'Use MyObserver.with_args(x=1) to override arguments to constructor if needed')\n self = super().__new__(cls, input_activation, weight)\n return self", + "docstring": "Describes how to quantize a layer or a part of the network specifically for input-weight equalization by providing settings (observer classes) for inputs, outputs, and weights. Note that EqualizationQConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with method (that behaves like functools.partial): my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8), weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8))", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "ClassDef name:EqualizationQConfig Call Assign FunctionDef name:__new__ arg:cls arg:input_activation arg:weight arguments arg arg arg If BoolOp Call Call Raise Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "any", + "source_code": "def any(self, *, skipna: bool=True, **kwargs) -> bool | NAType:\n return self._reduce('any', skipna=skipna, **kwargs)", + "docstring": "Return whether any element is truthy. Returns False unless there is at least one element that is truthy. By default, NAs are skipped. If `Kleene logic skipnaskipnapandas.NA` is True or False influences the result): >>> pd.array([True, False, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) True >>> pd.array([1, 0, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) True >>> pd.array([False, False, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) >>> pd.array([0, 0, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:any arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_process_stmt", + "source_code": "def _process_stmt(self, node: ast.stmt) -> None:\n if isinstance(node, (ast.ClassDef, ast.FunctionDef)):\n self._process_def(node)\n elif isinstance(node, ast.Assign):\n self._process_assign(node)\n elif isinstance(node, ast.Expr):\n self._process_expr(node)\n else:\n self.visit(node)", + "docstring": "Process top-level statement for exported apis.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py", + "ast_data": "FunctionDef name:_process_stmt arg:self arg:node arguments arg arg If Call Call If Call Call If Call Call Call" + }, + { + "library": "scikit-learn", + "name": "param_default_value", + "source_code": "def param_default_value(p):\n return p.name != 'self' and p.kind != p.VAR_KEYWORD and (p.kind != p.VAR_POSITIONAL) and (p.default != p.empty)", + "docstring": "Identify hyper parameters of an estimator.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:param_default_value arg:p arguments arg Return return:yes BoolOp Compare Compare Compare Compare" + }, + { + "library": "pandas", + "name": "nbytes", + "source_code": "@cache_readonly\ndef nbytes(self) -> int:\n return self._nbytes(False)", + "docstring": "return the number of bytes in the underlying data", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:nbytes arg:self arguments arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "update", + "source_code": "def update(self, *args, **kwargs):\n for sprite in self.sprites():\n sprite.update(*args, **kwargs)", + "docstring": "call the update method of every member sprite Group.update(*args, **kwargs): return None Calls the update method of every member sprite. All arguments that were passed to this method are passed to the Sprite update function.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:update arg:self arguments arg arg arg For Call Call" + }, + { + "library": "numpy", + "name": "setuptools_run", + "source_code": "def setuptools_run(self):\n from distutils.command.install import install as distutils_install\n if self.old_and_unmanageable or self.single_version_externally_managed:\n return distutils_install.run(self)\n caller = sys._getframe(3)\n caller_module = caller.f_globals.get('__name__', '')\n caller_name = caller.f_code.co_name\n if caller_module != 'distutils.dist' or caller_name != 'run_commands':\n distutils_install.run(self)\n else:\n self.do_egg_install()", + "docstring": "The setuptools version of the .run() method. We must pull in the entire code so we can override the level used in the _getframe() call since we wrap this call by one more level.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\command\\install.py", + "ast_data": "FunctionDef name:setuptools_run arg:self arguments arg If BoolOp Return return:yes Call Assign Call Assign Call Assign If BoolOp Compare Compare Call Call" + }, + { + "library": "pytorch", + "name": "_is_autocast_sub_mod", + "source_code": "def _is_autocast_sub_mod(node: torch.fx.Node) -> bool:\n if node.op == 'call_module':\n assert isinstance(node.target, str)\n subgm = getattr(node.graph.owning_module, node.target)\n first_non_ph = nodes_first(subgm.graph.nodes, lambda node: node.op != 'placeholder')\n if first_non_ph and first_non_ph.op == 'call_function' and (first_non_ph.target == torch.amp.autocast_mode._enter_autocast):\n return True\n return False", + "docstring": "Check if the first non-placeholder node is .", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py", + "ast_data": "FunctionDef name:_is_autocast_sub_mod arg:node arguments arg If Compare Call Assign Call Assign Call arguments arg Compare If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "where", + "source_code": "@final\ndef where(self, cond, other=None) -> Index:\n if isinstance(self, ABCMultiIndex):\n raise NotImplementedError('.where is not supported for MultiIndex operations')\n cond = np.asarray(cond, dtype=bool)\n return self.putmask(~cond, other)", + "docstring": "Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index([\"car\", \"bike\", \"train\", \"tractor\"]) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin([\"car\", \"train\"]), \"other\") Index(['car', 'other', 'train', 'other'], dtype='object')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:where arg:self arg:cond arg:other arguments arg arg arg If Call Raise Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "all_reduce_coalesced", + "source_code": "@_exception_logger\n@deprecated('`torch.distributed.all_reduce_coalesced` will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/main/distributed.html#collective-functions', category=FutureWarning)\ndef all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):\n if isinstance(tensors, torch.Tensor):\n tensors = [tensors]\n _check_tensor_list(tensors, 'tensor')\n _ensure_all_tensors_same_dtype(tensors)\n if _rank_not_in_group(group):\n _warn_not_in_group('all_reduce_coalesced')\n return\n if any((t.is_complex() for t in tensors)) and (not supports_complex(op)):\n raise ValueError(f'all_reduce does not support {op} on complex tensors')\n tensors = [t if not t.is_complex() else torch.view_as_real(t) for t in tensors]\n opts = AllreduceCoalescedOptions()\n opts.reduceOp = op\n opts.asyncOp = async_op\n group = group or _get_default_group()\n work = group.allreduce_coalesced(tensors, opts)\n if async_op:\n return work.get_future()\n elif work is not None:\n work.wait()", + "docstring": "WARNING: at this time individual shape checking is not implemented across nodes. For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the allreduce operation will proceed without complaint and return erroneous outputs. This lack of shape checking results in significant performance improvements but users of this function should take extra care to ensure that each node passes in tensors whose shapes match across nodes. Reduces each tensor in tensors (residing on the same device) across all machines in such a way that all get the final result. After the call each tensor in tensors is going to bitwise identical in all processes. Complex tensors are supported. Args: tensors (Union[List[Tensor], Tensor]): Input and output of the collective. The function operates in-place. op (Optional[ReduceOp]): One of the values from `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (Optional[bool]): Whether this op should be an async op. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:all_reduce_coalesced arg:tensors arg:op arg:group arg:async_op arguments arg arg arg arg If Call Assign Call Call If Call Call Return return:no If BoolOp Call Call Call Raise Call Assign Call Call Assign Call Assign Assign Assign BoolOp Call Assign Call If Return return:yes Call If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "RestoredResource", + "source_code": "class RestoredResource(TrackableResource):\n\n def __init__(self, device=''):\n super().__init__(device=device)\n\n @classmethod\n def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):\n obj = cls(device=object_proto.resource.device)\n resource_creator = dependencies.get('_create_resource')\n if resource_creator is not None:\n obj._create_resource = resource_creator\n return obj\n\n def _add_trackable_child(self, name, value):\n setattr(self, name, value)\n if isinstance(value, base.Trackable) and (not isinstance(value, def_function.Function)):\n self._track_trackable(value, name)", + "docstring": "Restored SavedResource.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py", + "ast_data": "ClassDef name:RestoredResource FunctionDef name:__init__ arg:self arg:device arguments arg arg Call Call FunctionDef name:_deserialize_from_proto arg:cls arg:object_proto arg:dependencies arguments arg arg arg arg Assign Call Assign Call If Compare Assign Return return:yes FunctionDef name:_add_trackable_child arg:self arg:name arg:value arguments arg arg arg Call If BoolOp Call Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None, listeners=None, save_graph_def=True):\n logging.info('Create CheckpointSaverHook.')\n if saver is not None and scaffold is not None:\n raise ValueError('You cannot provide both saver and scaffold.')\n self._saver = saver\n self._checkpoint_dir = checkpoint_dir\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n self._scaffold = scaffold\n self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n self._listeners = listeners or []\n self._steps_per_run = 1000000\n self._save_graph_def = save_graph_def", + "docstring": "Initializes a . Args: checkpoint_dir: , base directory for the checkpoint files. save_secs: , save every N secs. save_steps: , save every N steps. saver: object, used for saving. checkpoint_basename: , base name for the checkpoint files. scaffold: , use to get saver object. listeners: List of subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. save_graph_def: Whether to save the GraphDef and MetaGraphDef to . The GraphDef is saved after the session is created as . MetaGraphDefs are saved out for every checkpoint as . Raises: ValueError: One of or should be set. ValueError: At most one of or should be set.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:checkpoint_dir arg:save_secs arg:save_steps arg:saver arg:checkpoint_basename arg:scaffold arg:listeners arg:save_graph_def arguments arg arg arg arg arg arg arg arg arg Call If BoolOp Compare Compare Raise Call Assign Assign Assign Call Assign Assign Call Assign BoolOp Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_pg_config", + "source_code": "def _get_pg_config(group: Optional[ProcessGroup]=None) -> dict[str, Any]:\n pg = group or _get_default_group()\n return {'pg_name': _get_process_group_name(pg), 'pg_desc': pg.group_desc, 'backend_config': get_backend_config(pg), 'pg_size': _get_group_size(pg), 'ranks': get_process_group_ranks(pg)}", + "docstring": "Return the pg configuration of the given process group.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_get_pg_config arg:group arguments arg Assign BoolOp Call Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "swaplevel", + "source_code": "def swaplevel(self, i=-2, j=-1) -> MultiIndex:\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n i = self._get_level_number(i)\n j = self._get_level_number(j)\n new_levels[i], new_levels[j] = (new_levels[j], new_levels[i])\n new_codes[i], new_codes[j] = (new_codes[j], new_codes[i])\n new_names[i], new_names[j] = (new_names[j], new_names[i])\n return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)", + "docstring": "Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. DataFrame.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex( ... levels=[[\"a\", \"b\"], [\"bb\", \"aa\"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] ... ) >>> mi MultiIndex([('a', 'bb'), ('a', 'aa'), ('b', 'bb'), ('b', 'aa')], ) >>> mi.swaplevel(0, 1) MultiIndex([('bb', 'a'), ('aa', 'a'), ('bb', 'b'), ('aa', 'b')], )", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:swaplevel arg:self arg:i arg:j arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "_concat_homogeneous_fastpath", + "source_code": "def _concat_homogeneous_fastpath(mgrs_indexers, shape: Shape, first_dtype: np.dtype) -> Block:\n if all((not indexers for _, indexers in mgrs_indexers)):\n arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]\n arr = np.concatenate(arrs).T\n bp = libinternals.BlockPlacement(slice(shape[0]))\n nb = new_block_2d(arr, bp)\n return nb\n arr = np.empty(shape, dtype=first_dtype)\n if first_dtype == np.float64:\n take_func = libalgos.take_2d_axis0_float64_float64\n else:\n take_func = libalgos.take_2d_axis0_float32_float32\n start = 0\n for mgr, indexers in mgrs_indexers:\n mgr_len = mgr.shape[1]\n end = start + mgr_len\n if 0 in indexers:\n take_func(mgr.blocks[0].values, indexers[0], arr[:, start:end])\n else:\n arr[:, start:end] = mgr.blocks[0].values\n start += mgr_len\n bp = libinternals.BlockPlacement(slice(shape[0]))\n nb = new_block_2d(arr, bp)\n return nb", + "docstring": "With single-Block managers with homogeneous dtypes (that can already hold nan), we avoid [...]", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_concat_homogeneous_fastpath arg:mgrs_indexers arg:shape arg:first_dtype arguments arg arg arg If Call Assign Assign Call Assign Call Call Assign Call Return return:yes Assign Call If Compare Assign Assign Assign For Assign Assign If Compare Call Assign Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_node_target", + "source_code": "@compatibility(is_backward_compatible=False)\ndef get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str:\n assert node.op in CALLABLE_NODE_OPS, 'Expect op types of ' + ', '.join(CALLABLE_NODE_OPS) + f', but found {node.op}'\n if node.op == 'call_module':\n assert isinstance(node.target, str)\n submod = submodules[node.target]\n submod_type = getattr(submod, '_base_class_origin', type(submod))\n return get_acc_ops_name(submod_type)\n elif node.op == 'call_function':\n target: Any = node.target\n return f'acc_ops.{target.__name__}' if target.__module__ is not None and 'acc_ops' in target.__module__ else _get_qualified_name(target)\n else:\n assert isinstance(node.target, str)\n return node.target", + "docstring": "Given a returns its target typename. For \"call_method\" node, return node.target which is the name of that method being called. This could potential lead to conflict but should be okay because normally it's on a tensor. For \"call_function\" node, return typename of node.target. For \"call_module\" node, return typename of the module that node.target point to. If seeing \"_VariableFunctionsClass\" in the target name string, it will be replaced by \"torch\". e.g. _VariableFunctionsClass.relu would become torch.relu.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\tools_common.py", + "ast_data": "FunctionDef name:get_node_target arg:submodules arg:node arguments arg arg Compare Call If Compare Call Assign Assign Call Call Return return:yes Call If Compare Return return:yes BoolOp Compare Compare Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_and_write_registered_savers", + "source_code": "def _get_and_write_registered_savers(registered_trackables: Dict[str, List[_TrackableData]], object_graph_proto: trackable_object_graph_pb2.TrackableObjectGraph) -> Dict[str, Dict[str, base.Trackable]]:\n registered_savers = collections.defaultdict(dict)\n for saver_name, trackables in registered_trackables.items():\n for td in trackables:\n registered_savers[saver_name][td.object_name] = td.object_to_save\n object_proto = object_graph_proto.nodes[td.node_id]\n object_proto.registered_saver.name = saver_name\n object_proto.registered_saver.object_name = td.object_name\n return registered_savers", + "docstring": "Generates dictionary of registered savers and updates the proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py", + "ast_data": "FunctionDef name:_get_and_write_registered_savers arg:registered_trackables arg:object_graph_proto arguments arg arg Assign Call For Call For Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_all_sharing_strategies", + "source_code": "def get_all_sharing_strategies():\n return _all_sharing_strategies", + "docstring": "Return a set of sharing strategies supported on a current system.", + "type": "function", + "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", + "ast_data": "FunctionDef name:get_all_sharing_strategies arguments Return return:yes" + }, + { + "library": "django", + "name": "check_geom", + "source_code": "def check_geom(result, func, cargs):\n if isinstance(result, int):\n result = c_void_p(result)\n if not result:\n raise GDALException('Invalid geometry pointer returned from \"%s\".' % func.__name__)\n return result", + "docstring": "Check a function that returns a geometry.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py", + "ast_data": "FunctionDef name:check_geom arg:result arg:func arg:cargs arguments arg arg arg If Call Assign Call If Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "_construct_result", + "source_code": "def _construct_result(self, result, other) -> DataFrame:\n out = self._constructor(result, copy=False).__finalize__(self)\n out.columns = self.columns\n out.index = self.index\n out = out.__finalize__(other)\n return out", + "docstring": "Wrap the result of an arithmetic, comparison, or logical operation. Parameters ---------- result : DataFrame Returns ------- DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_construct_result arg:self arg:result arg:other arguments arg arg arg Assign Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, dataset, coordinator):\n if isinstance(dataset, input_lib.DistributedDataset):\n original_dataset = dataset._original_dataset\n serialized = serialize_dataset_to_graph(original_dataset)\n\n def dataset_fn():\n deserialized = deserialize_dataset_from_graph(serialized, original_dataset.element_spec)\n dataset.build(dataset_to_replace=deserialized)\n return dataset\n elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):\n\n def dataset_fn():\n dataset.build()\n return dataset\n elif isinstance(dataset, dataset_ops.Dataset):\n serialized = serialize_dataset_to_graph(dataset)\n\n def dataset_fn():\n return deserialize_dataset_from_graph(serialized, dataset.element_spec)\n else:\n raise ValueError('Unexpected dataset type!')\n super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)", + "docstring": "Makes an iterable from datasets created by the given dataset. It creates a dataset_fn which deserializes a dataset from a graph under the hood. Args: dataset: A tf.data.Dataset, a DistributedDataset or a DistributedDatasetsFromFunction coordinator: a object, used to create dataset resources.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:coordinator arguments arg arg arg If Call Assign Assign Call FunctionDef name:dataset_fn arguments Assign Call Call Return return:yes If Call FunctionDef name:dataset_fn arguments Call Return return:yes If Call Assign Call FunctionDef name:dataset_fn arguments Return return:yes Call Raise Call Call Call" + }, + { + "library": "pandas", + "name": "_is_type_compatible", + "source_code": "def _is_type_compatible(a, b) -> bool:\n is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n return is_number(a) and is_number(b) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or com.any_none(a, b)", + "docstring": "Helper for interval_range to check type compat of start/end/freq.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\interval.py", + "ast_data": "FunctionDef name:_is_type_compatible arg:a arg:b arguments arg arg Assign arguments arg Call Assign arguments arg Call Return return:yes BoolOp BoolOp Call Call BoolOp Call Call BoolOp Call Call Call" + }, + { + "library": "scrapy", + "name": "getpriority", + "source_code": "def getpriority(self, name: _SettingsKeyT) -> int | None:\n if name not in self:\n return None\n return self.attributes[name].priority", + "docstring": "Return the current numerical priority value of a setting, or `` does not exist. :param name: the setting name :type name: str", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:getpriority arg:self arg:name arguments arg arg If Compare Return return:no Return return:yes" + }, + { + "library": "kornia", + "name": "_get_nms_kernel2d", + "source_code": "def _get_nms_kernel2d(kx: int, ky: int) -> Tensor:\n numel: int = ky * kx\n center: int = numel // 2\n weight = eye(numel)\n weight[center, center] = 0\n return weight.view(numel, 1, ky, kx)", + "docstring": "Return neigh2channels conv kernel.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py", + "ast_data": "FunctionDef name:_get_nms_kernel2d arg:kx arg:ky arguments arg arg Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "CELU", + "source_code": "class CELU(Module):\n __constants__ = ['alpha', 'inplace']\n alpha: float\n inplace: bool\n\n def __init__(self, alpha: float=1.0, inplace: bool=False) -> None:\n super().__init__()\n self.alpha = alpha\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.celu(input, self.alpha, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = ', inplace=True' if self.inplace else ''\n return f'alpha={self.alpha}{inplace_str}'", + "docstring": "Applies the CELU function element-wise. .. math:: \\text{CELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x/\\alpha) - 1)) More details can be found in the paper _ . Args: alpha: the :math: value for the CELU formulation. Default: 1.0 inplace: can optionally do the operation in-place. Default: `(*)*(*)Continuously Differentiable Exponential Linear Units`:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:CELU Assign FunctionDef name:__init__ arg:self arg:alpha arg:inplace arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "add_constraint", + "source_code": "def add_constraint(self, constraint: Callable):\n self.constraints.append(constraint)\n self._validated = False", + "docstring": "Adds a constraint into the current list of constraints.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py", + "ast_data": "FunctionDef name:add_constraint arg:self arg:constraint arguments arg arg Call Assign" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit_transform(self, X, y=None):\n self._fit_transform(X)\n return self.embedding_", + "docstring": "Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : array-like, shape (n_samples, n_components) X transformed in the new space.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "print_png", + "source_code": "def print_png(self, fname_or_fh, **kwargs):\n converter = make_pdf_to_png_converter()\n with TemporaryDirectory() as tmpdir:\n tmppath = pathlib.Path(tmpdir)\n pdf_path = tmppath / 'figure.pdf'\n png_path = tmppath / 'figure.png'\n self.print_pdf(pdf_path, **kwargs)\n converter(pdf_path, png_path, dpi=self.figure.dpi)\n with png_path.open('rb') as orig, cbook.open_file_cm(fname_or_fh, 'wb') as dest:\n shutil.copyfileobj(orig, dest)", + "docstring": "Use LaTeX to compile a pgf figure to pdf and convert it to png.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", + "ast_data": "FunctionDef name:print_png arg:self arg:fname_or_fh arguments arg arg arg Assign Call With Call Assign Call Assign Assign Call Call With Call Call Call" + }, + { + "library": "tensorflow", + "name": "_SparseSegmentReduceGradV2", + "source_code": "def _SparseSegmentReduceGradV2(op, grad, norm=None):\n assert norm is None or norm == 'mean' or norm == 'sqrtn'\n indices = op.inputs[1]\n segment_ids = op.inputs[2]\n data_shape = array_ops.shape(op.inputs[0])\n dense_output_dim0 = data_shape[0]\n if norm == 'mean':\n grad_fn = math_ops.sparse_segment_mean_grad_v2\n elif norm == 'sqrtn':\n grad_fn = math_ops.sparse_segment_sqrt_n_grad_v2\n else:\n grad_fn = math_ops.sparse_segment_sum_grad_v2\n grad_values, sorted_unique_indices = grad_fn(grad, indices, segment_ids, dense_output_dim0)\n return indexed_slices_lib.IndexedSlices(grad_values, sorted_unique_indices, data_shape)", + "docstring": "Sparse gradient for SparseSegment(Sum|Mean|SqrtN)[WithNumSegments].", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_SparseSegmentReduceGradV2 arg:op arg:grad arg:norm arguments arg arg arg BoolOp Compare Compare Compare Assign Assign Assign Call Assign If Compare Assign If Compare Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "codegen_boilerplate", + "source_code": "def codegen_boilerplate(self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes):\n boiler_plate = f\"# flake8: noqa: B950\\n# fmt: off\\n# This file was generated by AutoHeuristic. Do not modify it manually!\\n# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/\\nfrom typing import Optional\\n\\nfrom torch._inductor.autoheuristic.autoheuristic_utils import (\\n AHContext,\\n AHMetadata,\\n Choice,\\n)\\nfrom torch._inductor.autoheuristic.learnedheuristic_interface import (\\n LearnedHeuristicDecision,\\n)\\n\\n\\nclass {heuristic_name}(LearnedHeuristicDecision):\\n\\n def __init__(self) -> None:\\n self.choices: list[Choice] = []\\n self.fill_choices()\\n\\n{self.gen_precondition(opt_name, shared_memory, device_capa)}\\n\\n def get_confidence_threshold(self) -> float:\\n return {threshold}\\n\\n def get_choice(self, idx: int) -> Optional[str]:\\n if idx < len(self.choices):\\n return self.choices[idx]\\n return None\\n\\n def fill_choices(self) -> None:\\n{self.gen_classes(classes, num_spaces=8)}\\n\\n def get_name(self) -> str:\\n return '{opt_name}'\"\n return boiler_plate", + "docstring": "Generates the boilerplate code for the generated heuristic. This includes things like imports, class definition, etc.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:codegen_boilerplate arg:self arg:heuristic_name arg:opt_name arg:threshold arg:shared_memory arg:device_capa arg:classes arguments arg arg arg arg arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "dispatch_ufunc_with_out", + "source_code": "def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\n out = kwargs.pop('out')\n where = kwargs.pop('where', None)\n result = getattr(ufunc, method)(*inputs, **kwargs)\n if result is NotImplemented:\n return NotImplemented\n if isinstance(result, tuple):\n if not isinstance(out, tuple) or len(out) != len(result):\n raise NotImplementedError\n for arr, res in zip(out, result):\n _assign_where(arr, res, where)\n return out\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n else:\n raise NotImplementedError\n _assign_where(out, result, where)\n return out", + "docstring": "If we have an keyword, then call the ufunc without and then set the result into the given .", + "type": "function", + "file_path": "pandas\\pandas\\core\\arraylike.py", + "ast_data": "FunctionDef name:dispatch_ufunc_with_out arg:self arg:ufunc arg:method arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call If Compare Return return:yes If Call If BoolOp Call Compare Call Call Raise For Call Call Return return:yes If Call If Compare Call Assign Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Dropout2d", + "source_code": "class Dropout2d(_DropoutNd):\n\n def forward(self, input: Tensor) -> Tensor:\n return F.dropout2d(input, self.p, self.training, self.inplace)", + "docstring": "Randomly zero out entire channels. A channel is a 2D feature map, e.g., the :math:-th channel of the :math:-th sample in the batched input is a 2D tensor :math:. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. Usually the input comes from :class: modules. As described in the paper _ , if adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then i.i.d. dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, :func: will help promote independence between feature maps and should be used instead. Args: p (float, optional): probability of an element to be zero-ed. inplace (bool, optional): If set to `nn.Dropout1d(C, H, W)nn.Dropout1d(N, C, H, W)(N, C, L)(N, C, H, W)(N, C, L)` (same shape as input). Examples:: >>> m = nn.Dropout2d(p=0.2) >>> input = torch.randn(20, 16, 32, 32) >>> output = m(input) .. _Efficient Object Localization Using Convolutional Networks:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\dropout.py", + "ast_data": "ClassDef name:Dropout2d FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Gulf", + "source_code": "class Gulf(Benchmark):\n\n def __init__(self, dimensions=3):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [50.0] * self.N))\n self.global_optimum = [[50.0, 25.0, 1.5]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n m = 99.0\n i = arange(1.0, m + 1)\n u = 25 + (-50 * log(i / 100.0)) ** (2 / 3.0)\n vec = exp(-(abs(u - x[1]) ** x[2] / x[0])) - i / 100.0\n return sum(vec ** 2)", + "docstring": "Gulf objective function. This class defines the Gulf [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Gulf}}(x) = \\sum_{i=1}^99 \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1}} - t_i \\right) Where, in this exercise: .. math:: t_i = i/100 \\\\ y_i = 25 + [-50 \\log(t_i)]^{2/3} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO Gavana has absolute of (u - x[1]) term. Jamil doesn't... Leaving it in.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py", + "ast_data": "ClassDef name:Gulf FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_IntegerInterval", + "source_code": "class _IntegerInterval(Constraint):\n is_discrete = True\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n super().__init__()\n\n def check(self, value):\n return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound)\n\n def __repr__(self):\n fmt_string = self.__class__.__name__[1:]\n fmt_string += f'(lower_bound={self.lower_bound}, upper_bound={self.upper_bound})'\n return fmt_string", + "docstring": "Constrain to an integer interval .", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_IntegerInterval Assign FunctionDef name:__init__ arg:self arg:lower_bound arg:upper_bound arguments arg arg arg Assign Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "ensure_tempdir", + "source_code": "def ensure_tempdir(builder: Builder) -> Path:\n if not hasattr(builder, '_imgmath_tempdir'):\n builder._imgmath_tempdir = Path(tempfile.mkdtemp())\n return builder._imgmath_tempdir", + "docstring": "Create temporary directory. use only one tempdir per build -- the use of a directory is cleaner than using temporary files, since we can clean up everything at once just removing the whole directory (see cleanup_tempdir)", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\imgmath.py", + "ast_data": "FunctionDef name:ensure_tempdir arg:builder arguments arg If Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "state_dict", + "source_code": "def state_dict(self) -> dict[str, Any]:\n groups: list[dict[str, Any]] = [dict(filter(lambda key_value: key_value[0] not in KEYS_NOT_IN_STATE_DICT, mg.items())) for mg in self.groups]\n return {'state': self.state, 'groups': groups}", + "docstring": "Returns the state of the optimizer as a :class:. It contains: * state - current state of the sparsification. * groups - a list containing all sparsity configuration groups with the key 'tensor_fqn' specifying the path to the sparsified tensor within a model TODO: Need a clean way of loading the state of the \"prepared\" module", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py", + "ast_data": "FunctionDef name:state_dict arg:self arguments arg Call Call arguments arg Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_raw_feature_as_tensor", + "source_code": "def _get_raw_feature_as_tensor(self, key):\n raw_feature = self._features[key]\n feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature)\n\n def expand_dims(input_tensor):\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1])\n else:\n return array_ops.expand_dims(input_tensor, -1)\n rank = feature_tensor.get_shape().ndims\n if rank is not None:\n if rank == 0:\n raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))\n return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]):\n return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)", + "docstring": "Gets the raw_feature (keyed by ) as . The raw feature is converted to (sparse) tensor and maybe expand dim. For both and , the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A key to access the raw feature. Returns: A or . Raises: ValueError: if the raw feature has rank 0.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_get_raw_feature_as_tensor arg:self arg:key arguments arg arg Assign Assign Call FunctionDef name:expand_dims arg:input_tensor arguments arg If Call Return return:yes Call Call Return return:yes Call Assign Call If Compare If Compare Raise Call Call Return return:yes Compare Call With Call Call Call Call Return return:yes Call Call Call arguments Call arguments" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, *, angle=0.0, theta1=0.0, theta2=360.0, **kwargs):\n fill = kwargs.setdefault('fill', False)\n if fill:\n raise ValueError('Arc objects cannot be filled')\n super().__init__(xy, width, height, angle=angle, **kwargs)\n self.theta1 = theta1\n self.theta2 = theta2\n self._theta1, self._theta2, self._stretched_width, self._stretched_height = self._theta_stretch()\n self._path = Path.arc(self._theta1, self._theta2)", + "docstring": "Parameters ---------- xy : (float, float) The center of the ellipse. width : float The length of the horizontal axis. height : float The length of the vertical axis. angle : float Rotation of the ellipse in degrees (counterclockwise). theta1, theta2 : float, default: 0, 360 Starting and ending angles of the arc in degrees. These values are relative to *angle*, e.g. if *angle* = 45 and *theta1* = 90 the absolute starting angle is 135. Default *theta1* = 0, *theta2* = 360, i.e. a complete ellipse. The arc is drawn in the counterclockwise direction. Angles greater than or equal to 360, or smaller than 0, are represented by an equivalent angle in the range [0, 360), by taking the input value mod 360. Other Parameters ---------------- **kwargs : properties Most properties are supported as keyword arguments, except *fill* and *facecolor* because filling is not supported. %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg arg Assign Call If Raise Call Call Call Assign Assign Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "mean_relative_error", + "source_code": "@tf_export(v1=['metrics.mean_relative_error'])\ndef mean_relative_error(labels, predictions, normalizer, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.mean_relative_error is not supported when eager execution is enabled.')\n predictions, labels, weights = _remove_squeezable_dimensions(predictions=predictions, labels=labels, weights=weights)\n predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(predictions, normalizer)\n predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())\n relative_errors = array_ops.where(math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.divide(math_ops.abs(labels - predictions), normalizer))\n return mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error')", + "docstring": "Computes the mean relative error by normalizing with the given values. The function creates two local variables, and that are used to compute the mean relative absolute error. This average is weighted by , and it is ultimately returned as : an idempotent operation that simply divides by . For estimation of the metric over a stream of data, the function creates an operation that updates these variables and returns the . Internally, a operation divides the absolute value of the differences between and by the . Then increments with the reduced sum of the product of and , and it increments with the reduced sum of . If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A of the same shape as . predictions: A of arbitrary shape. normalizer: A of the same shape as . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: mean_relative_error: A representing the current mean, the value of divided by . update_op: An operation that increments the and variables appropriately and whose value matches . Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:mean_relative_error arg:labels arg:predictions arg:normalizer arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg arg If Call Raise Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Call Return return:yes Call BoolOp Call" + }, + { + "library": "scikit-learn", + "name": "bhtsne", + "source_code": "def bhtsne(X):\n n_iter = -1\n return (run_bh_tsne(X, use_pca=False, perplexity=args.perplexity, verbose=args.verbose > 0), n_iter)", + "docstring": "Wrapper for the reference lvdmaaten/bhtsne implementation.", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_tsne_mnist.py", + "ast_data": "FunctionDef name:bhtsne arg:X arguments arg Assign Return return:yes Call Compare" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, widthA=1.0, angleA=0, widthB=1.0, angleB=0):\n super().__init__(widthA=widthA, lengthA=0, angleA=angleA, widthB=widthB, lengthB=0, angleB=angleB)", + "docstring": "Parameters ---------- widthA, widthB : float, default: 1.0 Width of the bracket. angleA, angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:angleA arg:widthB arg:angleB arguments arg arg arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_init_local_init_op", + "source_code": "def _init_local_init_op(self, local_init_op=USE_DEFAULT):\n if local_init_op is Supervisor.USE_DEFAULT:\n local_init_op = self._get_first_op_from_collection(ops.GraphKeys.LOCAL_INIT_OP)\n if local_init_op is None:\n op_list = [variables.local_variables_initializer(), lookup_ops.tables_initializer()]\n if op_list:\n local_init_op = control_flow_ops.group(*op_list)\n ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n self._local_init_op = local_init_op", + "docstring": "Initializes local_init_op. Args: local_init_op: run for every new supervisor instance. If set to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP collection. If the collection is empty, create an op that initializes all local variables and all tables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:_init_local_init_op arg:self arg:local_init_op arguments arg arg If Compare Assign Call If Compare Assign Call Call If Assign Call Call Assign" + }, + { + "library": "kornia", + "name": "AverageMeter", + "source_code": "class AverageMeter:\n val: Union[float, bool, Tensor]\n _avg: Union[float, Tensor]\n sum: Union[float, Tensor]\n count: int\n\n def __init__(self) -> None:\n self.reset()\n\n def reset(self) -> None:\n self.val = 0\n self._avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val: Union[float, bool, Tensor], n: int=1) -> None:\n self.val = val\n self.sum += val * n\n self.count += n\n self._avg = self.sum / self.count\n\n @property\n def avg(self) -> float:\n if isinstance(self._avg, Tensor):\n return float(self._avg.item())\n return self._avg", + "docstring": "Computes and stores the average and current value. Example: >>> stats = AverageMeter() >>> acc1 = torch.tensor(0.99) # coming from K.metrics.accuracy >>> stats.update(acc1, n=1) # where n is batch size usually >>> round(stats.avg, 2) 0.99", + "type": "class", + "file_path": "kornia\\kornia\\metrics\\average_meter.py", + "ast_data": "ClassDef name:AverageMeter FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:reset arg:self arguments arg Assign Assign Assign Assign FunctionDef name:update arg:self arg:val arg:n arguments arg arg arg Assign Assign FunctionDef name:avg arg:self arguments arg If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "rebuild_auth", + "source_code": "def rebuild_auth(self, prepared_request, response):\n if 'Authorization' in prepared_request.headers:\n prepared_request.headers.pop('Authorization', True)\n prepared_request.prepare_auth(self.auth)", + "docstring": "When being redirected we should always strip Authorization header, since nonce may not be reused as per OAuth spec.", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth1_session.py", + "ast_data": "FunctionDef name:rebuild_auth arg:self arg:prepared_request arg:response arguments arg arg arg If Compare Call Call" + }, + { + "library": "pytorch", + "name": "module_class", + "source_code": "@property\ndef module_class(self) -> type | str | None:\n return self.top()._module_class", + "docstring": "Returns the module class of the top module.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:module_class arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "validate_metadata", + "source_code": "def validate_metadata(self, handler: AppendableTable) -> None:\n if self.meta == 'category':\n new_metadata = self.metadata\n cur_metadata = handler.read_metadata(self.cname)\n if new_metadata is not None and cur_metadata is not None and (not array_equivalent(new_metadata, cur_metadata, strict_nan=True, dtype_equal=True)):\n raise ValueError('cannot append a categorical with different categories to the existing')", + "docstring": "validate that kind=category does not change the categories", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:validate_metadata arg:self arg:handler arguments arg arg If Compare Assign Assign Call If BoolOp Compare Compare Call Raise Call" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "@override\ndef step(self, epoch=None) -> None:\n if epoch is None and self.last_epoch < 0:\n epoch = 0\n if epoch is None:\n epoch = self.last_epoch + 1\n self.T_cur = self.T_cur + 1\n if self.T_cur >= self.T_i:\n self.T_cur = self.T_cur % self.T_i\n self.T_i = self.T_i * self.T_mult\n else:\n if epoch < 0:\n raise ValueError(f'Expected non-negative epoch, but got {epoch}')\n if epoch >= self.T_0:\n if self.T_mult == 1:\n self.T_cur = epoch % self.T_0\n else:\n n = int(math.log(epoch / self.T_0 * (self.T_mult - 1) + 1, self.T_mult))\n self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)\n self.T_i = self.T_0 * self.T_mult ** n\n else:\n self.T_i = self.T_0\n self.T_cur = epoch\n self.last_epoch = math.floor(epoch)\n with _enable_get_lr_call(self):\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]", + "docstring": "Step could be called after every batch update. Example: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> iters = len(dataloader) >>> for epoch in range(20): >>> for i, sample in enumerate(dataloader): >>> inputs, labels = sample['inputs'], sample['labels'] >>> optimizer.zero_grad() >>> outputs = net(inputs) >>> loss = criterion(outputs, labels) >>> loss.backward() >>> optimizer.step() >>> scheduler.step(epoch + i / iters) This function can be called in an interleaved way. Example: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> for epoch in range(20): >>> scheduler.step() >>> scheduler.step(26) >>> scheduler.step() # scheduler.step(27), instead of scheduler(20)", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:step arg:self arg:epoch arguments arg arg If BoolOp Compare Compare Assign If Compare Assign Assign If Compare Assign Assign If Compare Raise Call If Compare If Compare Assign Assign Call Call Assign Assign Assign Assign Assign Call With Call For Call Call Assign Assign" + }, + { + "library": "numpy", + "name": "array_repr", + "source_code": "@array_function_dispatch(_array_repr_dispatcher, module='numpy')\ndef array_repr(arr, max_line_width=None, precision=None, suppress_small=None):\n return _array_repr_implementation(arr, max_line_width, precision, suppress_small)", + "docstring": "Return the string representation of an array. Parameters ---------- arr : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than . Defaults to ``. Returns ------- string : str The string representation of an array. See Also -------- array_str, array2string, set_printoptions Examples -------- >>> import numpy as np >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) 'MaskedArray([0.])' >>> np.array_repr(np.array([], np.int32)) 'array([], dtype=int32)' >>> x = np.array([1e-6, 4e-7, 2, 3]) >>> np.array_repr(x, precision=6, suppress_small=True) 'array([0.000001, 0. , 2. , 3. ])'", + "type": "function", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:array_repr arg:arr arg:max_line_width arg:precision arg:suppress_small arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "dense_shape", + "source_code": "@property\ndef dense_shape(self):\n return self._dense_shape", + "docstring": "A 1-D containing the shape of the corresponding dense tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:dense_shape arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "AbsoluteError", + "source_code": "class AbsoluteError(BaseLoss):\n differentiable = False\n need_update_leaves_values = True\n\n def __init__(self, sample_weight=None):\n super().__init__(closs=CyAbsoluteError(), link=IdentityLink())\n self.approx_hessian = True\n self.constant_hessian = sample_weight is None\n\n def fit_intercept_only(self, y_true, sample_weight=None):\n if sample_weight is None:\n return np.median(y_true, axis=0)\n else:\n return _weighted_percentile(y_true, sample_weight, 50)", + "docstring": "Absolute error with identity link, for regression. Domain: y_true and y_pred all real numbers Link: y_pred = raw_prediction For a given sample x_i, the absolute error is defined as:: loss(x_i) = |y_true_i - raw_prediction_i| Note that the exact hessian = 0 almost everywhere (except at one point, therefore differentiable = False). Optimization routines like in HGBT, however, need a hessian > 0. Therefore, we assign 1.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "ClassDef name:AbsoluteError Assign Assign FunctionDef name:__init__ arg:self arg:sample_weight arguments arg arg Call Call Call Call Assign Assign Compare FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sdp_kernel", + "source_code": "@contextlib.contextmanager\n@deprecated('`torch.backends.cuda.sdp_kernel()` is deprecated. In the future, this context manager will be removed. Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, with updated signature.', category=FutureWarning)\ndef sdp_kernel(enable_flash: bool=True, enable_math: bool=True, enable_mem_efficient: bool=True, enable_cudnn: bool=True):\n from torch.nn.attention import sdpa_kernel\n backend_list = []\n if enable_flash:\n backend_list.append(SDPBackend.FLASH_ATTENTION)\n if enable_mem_efficient:\n backend_list.append(SDPBackend.EFFICIENT_ATTENTION)\n if enable_math:\n backend_list.append(SDPBackend.MATH)\n if enable_cudnn:\n backend_list.append(SDPBackend.CUDNN_ATTENTION)\n with sdpa_kernel(backend_list) as context:\n try:\n yield context\n finally:\n pass", + "docstring": ".. warning:: This flag is beta and subject to change. This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention. Upon exiting the context manager, the previous state of the flags will be restored.", + "type": "function", + "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py", + "ast_data": "FunctionDef name:sdp_kernel arg:enable_flash arg:enable_math arg:enable_mem_efficient arg:enable_cudnn arguments arg arg arg arg Assign If Call If Call If Call If Call With Call Try Call" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", + "docstring": "Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call" + }, + { + "library": "scipy", + "name": "Ratkowsky01", + "source_code": "class Ratkowsky01(Benchmark):\n\n def __init__(self, dimensions=4):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0, 1.0, 0.0, 0.1], [1000, 20.0, 3.0, 6.0]))\n self.global_optimum = [[699.6415127, 5.2771253025, 0.75962938329, 1.2792483859]]\n self.fglob = 8786.404908\n self.a = asarray([16.08, 33.83, 65.8, 97.2, 191.55, 326.2, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41])\n self.b = arange(1, 16.0)\n\n def fun(self, x, *args):\n self.nfev += 1\n vec = x[0] / (1 + exp(x[1] - x[2] * self.b)) ** (1 / x[3])\n return sum((self.a - vec) ** 2)", + "docstring": "Ratkowsky objective function. .. [1]", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py", + "ast_data": "ClassDef name:Ratkowsky01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_get_aspect_ratio", + "source_code": "def _get_aspect_ratio(self):\n figure_size = self.get_figure().get_size_inches()\n ll, ur = self.get_position() * figure_size\n width, height = ur - ll\n return height / (width * self.get_data_ratio())", + "docstring": "Convenience method to calculate the aspect ratio of the Axes in the display coordinate system.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:_get_aspect_ratio arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "Problem09", + "source_code": "class Problem09(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(3.1, 20.4)]\n self.global_optimum = 17.039\n self.fglob = -1.90596\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return sin(x) + sin(2.0 / 3.0 * x)", + "docstring": "Univariate Problem09 objective function. This class defines the Univariate Problem09 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem09}}(x) = \\sin(x) + \\sin \\left(\\frac{2}{3} x \\right) Bound constraints: :math: .. figure:: figures/Problem09.png :alt: Univariate Problem09 function :align: center **Univariate Problem09 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem09 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "masked_less_equal", + "source_code": "def masked_less_equal(x, value, copy=True):\n return masked_where(less_equal(x, value), x, copy=copy)", + "docstring": "Mask an array where less than or equal to a given value. This function is a shortcut to `condition` = (x >> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) masked_array(data=[--, --, --, 3], mask=[ True, True, True, False], fill_value=999999)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:masked_less_equal arg:x arg:value arg:copy arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_gotitem", + "source_code": "def _gotitem(self, key, ndim: int, subset=None):\n raise AbstractMethodError(self)", + "docstring": "sub-classes to define return a sliced object Parameters ---------- key : str / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_ReduceGradientArgs", + "source_code": "def _ReduceGradientArgs(x, y, gx, gy):\n if gx is not None or gy is not None:\n bx, by = SmartBroadcastGradientArgs(x, y)\n gx = _ReduceGradientArg(gx, bx)\n gy = _ReduceGradientArg(gy, by)\n return (gx, gy)", + "docstring": "Reduces gradients of both arguments of a broadcasting binary op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_ReduceGradientArgs arg:x arg:y arg:gx arg:gy arguments arg arg arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_edgecolor", + "source_code": "def set_edgecolor(self, color):\n self.patch.set_edgecolor(color)", + "docstring": "Set the edge color of the Figure rectangle. Parameters ---------- color : :mpltype:", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:set_edgecolor arg:self arg:color arguments arg arg Call" + }, + { + "library": "matplotlib", + "name": "get_stretch", + "source_code": "def get_stretch(self):\n return self._fontproperties.get_stretch()", + "docstring": "Return the font stretch as a string or a number. See Also -------- .font_manager.FontProperties.get_stretch", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_stretch arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "to_zpk", + "source_code": "def to_zpk(self):\n return ZerosPolesGain(*tf2zpk(self.num, self.den), **self._dt_dict)", + "docstring": "Convert system representation to . Returns ------- sys : instance of Zeros, poles, gain representation of the current system", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_zpk arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_inter_op_parallelism_threads", + "source_code": "@tf_export('config.threading.get_inter_op_parallelism_threads')\ndef get_inter_op_parallelism_threads():\n return context.context().inter_op_parallelism_threads", + "docstring": "Get number of threads used for parallelism between independent operations. Determines the number of threads used by independent non-blocking operations. 0 means the system picks an appropriate number. Returns: Number of parallel threads", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:get_inter_op_parallelism_threads arguments Return return:yes Call Call" + }, + { + "library": "django", + "name": "save_form", + "source_code": "def save_form(self, request, form, change):\n return form.save(commit=False)", + "docstring": "Given a ModelForm return an unsaved instance. `` is True if the object is being changed, and False if it's being added.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:save_form arg:self arg:request arg:form arg:change arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_node", + "source_code": "@compatibility(is_backward_compatible=True)\ndef create_node(self, kind: str, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], name: Optional[str]=None, type_expr: Optional[Any]=None) -> Node:\n if kind == 'call_function' and self.check_mutable_operations:\n check_for_mutable_operation(target, args, kwargs)\n node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)\n self.node_name_to_scope[node.name] = (self.scope.module_path, self.scope.module_type)\n if fx_traceback.has_preserved_node_meta():\n current_meta: dict[str, Any] = fx_traceback.get_current_meta()\n stack_trace = current_meta.get('stack_trace')\n if stack_trace:\n node.stack_trace = stack_trace\n for field in _COPY_META_FIELDS:\n if field in current_meta:\n node.meta[field] = copy.copy(current_meta[field])\n new_seq_nr = torch.autograd._get_sequence_nr() - 1\n if current_meta.get('in_grad_fn', 0) > 0:\n new_seq_nr = current_meta['grad_fn_seq_nr'][-1]\n node.meta['seq_nr'] = new_seq_nr\n elif self.module_stack:\n node.meta['nn_module_stack'] = copy.copy(self.module_stack)\n log.debug('create_node %s', node)\n return node", + "docstring": "Inserts a graph node given target, args, kwargs, and name. This method can be overridden to do extra checking, validation, or modification of values used in node creation. For example, one might want to disallow in-place operations from being recorded.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\proxy.py", + "ast_data": "FunctionDef name:create_node arg:self arg:kind arg:target arg:args arg:kwargs arg:name arg:type_expr arguments arg arg arg arg arg arg arg If BoolOp Compare Call Assign Call Assign If Call Call Assign Call If Assign For If Compare Assign Call Assign Call If Compare Call Assign Assign If Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_is_strictly_monotonic_decreasing", + "source_code": "@final\n@property\ndef _is_strictly_monotonic_decreasing(self) -> bool:\n return self.is_unique and self.is_monotonic_decreasing", + "docstring": "Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_is_strictly_monotonic_decreasing arg:self arguments arg Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "parents", + "source_code": "@property\ndef parents(self):\n return [self.key]", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "generate_enqueue_ops", + "source_code": "def generate_enqueue_ops(self, sharded_inputs, tpu_ordinal_function=None, placement_function=None):\n self.set_configuration_from_sharded_input_tensors(sharded_inputs)\n self.freeze()\n if self._generated_enqueue_ops and (not ops.inside_function()):\n raise ValueError(\"Can't generate two enqueue Ops from the same queue\")\n self._generated_enqueue_ops = True\n if tpu_ordinal_function is None:\n tpu_ordinal_function = lambda index: -1\n name_prefix = '%s/enqueue' % self._name\n return [self._generate_enqueue_op(shard, name_prefix, index, tpu_ordinal=tpu_ordinal_function(index), device=placement_function(index) if placement_function else None) for shard, index in zip(sharded_inputs, range(self.number_of_shards))]", + "docstring": "Generates the host-side Ops to enqueue the shards of a tuple. sharded_inputs is a list, one for each shard, of lists of Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed shard i of the queue. Returns the host-side Ops that must be run to enqueue the sharded tuple. The Op for shard i is colocated with the inputs for shard i. Implicitly freezes the queue configuration if it is not already frozen. If the configuration has already been frozen, and is not compatible with the types and shapes of sharded_inputs, an error will be raised. Args: sharded_inputs: a list of lists of Tensors. The length of the outer list determines the number of shards. Each inner list indicates the types and shapes of the tuples in the corresponding shard. tpu_ordinal_function: if not None, a function that takes the shard index as input and returns the ordinal of the TPU device the shard's infeed should be placed on. tpu_ordinal_function must be set if the inputs are placed on CPU devices. placement_function: if not None, a function that takes the shard index as input and returns the host device where the enqueue op should be placed on. Returns: A list of host-side Ops, one for each shard, that when executed together will enqueue a full-size element of infeed. Raises: ValueError: if the queue configuration has previously been frozen and the shapes of the elements of sharded_inputs are not compatible with the frozen configuration; or if the shapes of the elements of sharded_inputs don't form a consistent unsharded tuple; or if the elements of a tuple have different device constraints. TypeError: if the queue configuration has previously been frozen and the types of the elements of sharded_inputs are not compatible with the frozen configuration; or if the types of the elements of sharded_inputs don't form a consistent unsharded tuple.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:generate_enqueue_ops arg:self arg:sharded_inputs arg:tpu_ordinal_function arg:placement_function arguments arg arg arg arg Call Call If BoolOp Call Raise Call Assign If Compare Assign arguments arg Assign Return return:yes Call Call Call Call Call" + }, + { + "library": "kornia", + "name": "_normalize_input", + "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-06) -> torch.Tensor:\n sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n return (x - mp.detach()) / (sp.detach() + eps)", + "docstring": "Normalize the input by batch.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\affine_shape.py", + "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_hatch", + "source_code": "def get_hatch(self):\n return self._hatch", + "docstring": "Return the current hatching pattern.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_hatch arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_interval_max_min_ratio", + "source_code": "def _interval_max_min_ratio(data):\n diff = np.diff(np.sort(data))\n return diff.max() / diff.min()", + "docstring": "Compute the ratio between the largest and smallest inter-point distances. A value larger than 5 typically indicates that the parameter range would better be displayed with a log scale while a linear scale would be more suitable otherwise.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_plotting.py", + "ast_data": "FunctionDef name:_interval_max_min_ratio arg:data arguments arg Assign Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n return self._fit(X, partial=False)", + "docstring": "Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "IntGaugeCell", + "source_code": "class IntGaugeCell(object):\n __slots__ = ['_cell']\n\n def __init__(self, cell):\n self._cell = cell\n\n def set(self, value):\n pywrap_tfe.TFE_MonitoringIntGaugeCellSet(self._cell, value)\n\n def value(self):\n return pywrap_tfe.TFE_MonitoringIntGaugeCellValue(self._cell)", + "docstring": "A single integer value stored in an .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "ClassDef name:IntGaugeCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:set arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_weights", + "source_code": "def get_weights(self):\n weights = self.weights\n output_weights = []\n for weight in weights:\n if isinstance(weight, base_layer_utils.TrackableWeightHandler):\n output_weights.extend(weight.get_tensors())\n else:\n output_weights.append(weight)\n return backend.batch_get_value(output_weights)", + "docstring": "Returns the current weights of the layer, as NumPy arrays. The weights of a layer represent the state of the layer. This function returns both trainable and non-trainable weight values associated with this layer as a list of NumPy arrays, which can in turn be used to load state into similarly parameterized layers. For example, a layer returns a list of two values: the kernel matrix and the bias vector. These can be used to set the weights of another layer: >>> layer_a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]])) >>> layer_a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]])) >>> layer_b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> layer_b.set_weights(layer_a.get_weights()) >>> layer_b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Returns: Weights values as a list of NumPy arrays.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:get_weights arg:self arguments arg Assign Assign For If Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "timetz", + "source_code": "@property\ndef timetz(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, self.tz, box='time', reso=self._creso)", + "docstring": "Returns numpy array of :class: objects with timezones. The time part of the Timestamps. See Also -------- DatetimeIndex.time : Returns numpy array of :class: objects. The time part of the Timestamps. DatetimeIndex.tz : Return the timezone. Examples -------- For Series: >>> s = pd.Series([\"1/1/2020 10:00:00+00:00\", \"2/1/2020 11:00:00+00:00\"]) >>> s = pd.to_datetime(s) >>> s 0 2020-01-01 10:00:00+00:00 1 2020-02-01 11:00:00+00:00 dtype: datetime64[s, UTC] >>> s.dt.timetz 0 10:00:00+00:00 1 11:00:00+00:00 dtype: object For DatetimeIndex: >>> idx = pd.DatetimeIndex( ... [\"1/1/2020 10:00:00+00:00\", \"2/1/2020 11:00:00+00:00\"] ... ) >>> idx.timetz array([datetime.time(10, 0, tzinfo=datetime.timezone.utc), datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", + "ast_data": "FunctionDef name:timetz arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, layer_debug_metrics: Optional[Mapping[str, Callable[[np.ndarray], float]]]=None, model_debug_metrics: Optional[Mapping[str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray]], float]]]=None, layer_direct_compare_metrics: Optional[Mapping[str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray], float, int], float]]]=None, denylisted_ops: Optional[List[str]]=None, denylisted_nodes: Optional[List[str]]=None, fully_quantize: bool=False) -> None:\n self.layer_debug_metrics = layer_debug_metrics\n self.model_debug_metrics = model_debug_metrics\n self.layer_direct_compare_metrics = layer_direct_compare_metrics\n keys = []\n for metrics in [layer_debug_metrics, model_debug_metrics, layer_direct_compare_metrics]:\n if metrics is not None:\n keys.extend(metrics.keys())\n if len(keys) != len(set(keys)):\n raise ValueError('Provided metrics have duplicate keys.')\n self.denylisted_ops = denylisted_ops\n self.denylisted_nodes = denylisted_nodes\n self.fully_quantize = fully_quantize", + "docstring": "Initializes debugger options. Args: layer_debug_metrics: a dict to specify layer debug functions {function_name_str: function} where the function accepts result of NumericVerify Op, which is value difference between float and dequantized op results. The function returns single scalar value. model_debug_metrics: a dict to specify model debug functions {function_name_str: function} where the function accepts outputs from two models, and returns single scalar value for a metric. (e.g. accuracy, IoU) layer_direct_compare_metrics: a dict to specify layer debug functions {function_name_str: function}. The signature is different from that of , and this one gets passed (original float value, original quantized value, scale, zero point). The function's implementation is responsible for correctly dequantize the quantized value to compare. Use this one when comparing diff is not enough. (Note) quantized value is passed as int8, so cast to int32 is needed. denylisted_ops: a list of op names which is expected to be removed from quantization. denylisted_nodes: a list of op's output tensor names to be removed from quantization. fully_quantize: Bool indicating whether to fully quantize the model. Besides model body, the input/output will be quantized as well. Corresponding to mlir_quantize's fully_quantize parameter. Raises: ValueError: when there are duplicate keys", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:layer_debug_metrics arg:model_debug_metrics arg:layer_direct_compare_metrics arg:denylisted_ops arg:denylisted_nodes arg:fully_quantize arguments arg arg arg arg arg arg arg Assign Assign Assign Assign For If Compare Call Call If Compare Call Call Call Raise Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "trunc_to_int", + "source_code": "def trunc_to_int(self, x: T, dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Convert x to dtype with truncation semantics (similar to how the int constructor works in Python). In Inductor codegen, this just decays to trunc and then to_dtype, but this composite operation helps roundtrips for Sympy evaluation. dtype is taken as an explicit parameter because the desired output dtype is typically the index dtype, which may vary between int32 and int64 depending on if we've shown that all the indexing operations can be done in int32.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:trunc_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise" + }, + { + "library": "scipy", + "name": "poles", + "source_code": "def poles(self):\n if self._poles is None:\n m = self.weights.size\n B = np.eye(m + 1, dtype=self.weights.dtype)\n B[0, 0] = 0\n E = np.zeros_like(B, dtype=np.result_type(self.weights, self._support_points))\n E[0, 1:] = self.weights\n E[1:, 0] = 1\n np.fill_diagonal(E[1:, 1:], self._support_points)\n pol = scipy.linalg.eigvals(E, B)\n self._poles = pol[np.isfinite(pol)]\n return self._poles", + "docstring": "Compute the poles of the rational approximation. Returns ------- poles : array Poles of the AAA approximation, repeated according to their multiplicity but not in any specific order.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py", + "ast_data": "FunctionDef name:poles arg:self arguments arg If Compare Assign Assign Call Assign Assign Call Call Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "add_event", + "source_code": "def add_event(self, name: str) -> None:\n logger.debug('[app] adding event: %r', name)\n self.events.add(name)", + "docstring": "Register an event called *name*. This is needed to be able to emit it. :param name: The name of the event", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_event arg:self arg:name arguments arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "dispatch_for_types", + "source_code": "def dispatch_for_types(op, *types):\n\n def decorator(func):\n _TypeBasedDispatcher(get_compatible_func(op, func), types).register(op)\n return func\n return decorator", + "docstring": "Decorator to declare that a Python function overrides an op for a type. The decorated function is used to override if any of the arguments or keyword arguments (including elements of lists or tuples) have one of the specified types. Example: Args: op: Python function: the operation that should be overridden. *types: The argument types for which this function should be used.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:dispatch_for_types arg:op arguments arg arg FunctionDef name:decorator arg:func arguments arg Call Call Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, X, y=None):\n self._reset()\n return self.partial_fit(X, y)", + "docstring": "Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "dropout1d", + "source_code": "def dropout1d(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n inp_dim = input.dim()\n if inp_dim not in (2, 3):\n raise RuntimeError(f'dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. Note that dropout1d exists to provide channel-wise dropout on inputs with 1 spatial dimension, a channel dimension, and an optional batch dimension (i.e. 2D or 3D inputs).')\n is_batched = inp_dim == 3\n if not is_batched:\n input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n if not is_batched:\n result = result.squeeze_(0) if inplace else result.squeeze(0)\n return result", + "docstring": "Randomly zero out entire channels (a channel is a 1D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 1D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:dropout1d arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call If Compare Raise Call Assign Compare If Assign Call Call Assign Call Call If Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "FixedShardsPartitioner", + "source_code": "@tf_export('distribute.experimental.partitioners.FixedShardsPartitioner', v1=[])\nclass FixedShardsPartitioner(Partitioner):\n\n def __init__(self, num_shards):\n self._num_shards = num_shards\n\n def __call__(self, shape, dtype, axis=0):\n del dtype\n result = [1] * len(shape)\n result[axis] = min(self._num_shards, shape.dims[axis].value)\n return result", + "docstring": "Partitioner that allocates a fixed number of shards. Examples: >>> # standalone usage: >>> partitioner = FixedShardsPartitioner(num_shards=2) >>> partitions = partitioner(tf.TensorShape([10, 3]), tf.float32) >>> [2, 1] >>> >>> # use in ParameterServerStrategy >>> # strategy = tf.distribute.experimental.ParameterServerStrategy( >>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "ClassDef name:FixedShardsPartitioner FunctionDef name:__init__ arg:self arg:num_shards arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_construct", + "source_code": "@staticmethod\ndef _construct(cpp_module, init_fn):\n script_module = RecursiveScriptModule(cpp_module)\n init_fn(script_module)\n RecursiveScriptModule._finalize_scriptmodule(script_module)\n return script_module", + "docstring": "Construct a RecursiveScriptModule that's ready for use. PyTorch code should use this to construct a RecursiveScriptModule instead of instead of calling directly, as it makes sure the object is properly finalized (and in the future, we may take control of how the RecursiveScriptModule instance is created). Args: cpp_module: The C++ Module that will hold the actual state of this RecursiveScriptModule instance. init_fn: Lambda that initializes the RecursiveScriptModule passed to it.", + "type": "method", + "file_path": "pytorch\\torch\\jit\\_script.py", + "ast_data": "FunctionDef name:_construct arg:cpp_module arg:init_fn arguments arg arg Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_app_configs", + "source_code": "def get_app_configs(self):\n self.check_apps_ready()\n return self.app_configs.values()", + "docstring": "Import applications and return an iterable of app configs.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:get_app_configs arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SliceTransformer", + "source_code": "class SliceTransformer(converter.Base):\n\n def _process_single_assignment(self, target, value):\n if not isinstance(target, gast.Subscript):\n return None\n s = target.slice\n if isinstance(s, (gast.Tuple, gast.Slice)):\n return None\n template = '\\n target = ag__.set_item(target, key, item)\\n '\n return templates.replace(template, target=target.value, key=target.slice, item=value)\n\n def visit_Assign(self, node):\n node = self.generic_visit(node)\n if len(node.targets) != 1:\n raise NotImplementedError('multiple assignment')\n replacement = self._process_single_assignment(node.targets[0], node.value)\n if replacement is not None:\n return replacement\n return node\n\n def visit_Subscript(self, node):\n node = self.generic_visit(node)\n s = node.slice\n if isinstance(s, (gast.Tuple, gast.Slice)):\n return node\n if not isinstance(node.ctx, gast.Load):\n return node\n dtype = self.get_definition_directive(node.value, directives.set_element_type, 'dtype', default=templates.replace_as_expression('None'))\n template = '\\n ag__.get_item(\\n target,\\n key,\\n opts=ag__.GetItemOpts(element_dtype=dtype))\\n '\n return templates.replace_as_expression(template, target=node.value, key=s, dtype=dtype)", + "docstring": "Converts slicing operations to their TF counterpart. Currently, relying on the default slice operator that Tensor uses is insufficient, because TensorArray and tensor lists use dedicated index read and write functions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\slices.py", + "ast_data": "ClassDef name:SliceTransformer FunctionDef name:_process_single_assignment arg:self arg:target arg:value arguments arg arg arg If Call Return return:no Assign If Call Return return:no Assign Return return:yes Call FunctionDef name:visit_Assign arg:self arg:node arguments arg arg Assign Call If Compare Call Raise Call Assign Call If Compare Return return:yes Return return:yes FunctionDef name:visit_Subscript arg:self arg:node arguments arg arg Assign Call Assign If Call Return return:yes If Call Return return:yes Assign Call Call Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "shear_y", + "source_code": "def shear_y(min_mag: float, max_mag: float) -> OperationBase:\n if min_mag != -max_mag:\n raise ValueError(f'{ShearY.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n return ShearY(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)", + "docstring": "Return ShearY op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py", + "ast_data": "FunctionDef name:shear_y arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "transform", + "source_code": "@classmethod\ndef transform(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if extra_args is None:\n extra_args = {}\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n input = module.transform_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, transform=module.transform_matrix, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d mask operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.RandomTransplantation):\n input = module(input, params=cls.get_instance_module_param(param), data_keys=[DataKey.MASK], **extra_args)\n elif isinstance(module, _AugmentationBase):\n input = module.transform_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, **extra_args)\n elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n input = module.transform_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, K.container.ImageSequentialBase):\n input = module.transform_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n input = MaskSequentialOps.transform(input, module=module.op, param=param, extra_args=extra_args)\n return input", + "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign If Call Assign Call Call If Call Raise Call If Call Assign Call Call If Call Assign Call Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "each_context", + "source_code": "def each_context(self, request):\n script_name = request.META['SCRIPT_NAME']\n site_url = script_name if self.site_url == '/' and script_name else self.site_url\n return {'site_title': self.site_title, 'site_header': self.site_header, 'site_url': site_url, 'has_permission': self.has_permission(request), 'available_apps': self.get_app_list(request), 'is_popup': False, 'is_nav_sidebar_enabled': self.enable_nav_sidebar, 'log_entries': self.get_log_entries(request)}", + "docstring": "Return a dictionary of variables to put in the template context for *every* page in the admin site. For sites running on a subpath, use the SCRIPT_NAME value if site_url hasn't been customized.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:each_context arg:self arg:request arguments arg arg Assign Assign BoolOp Compare Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "local_concrete_fields", + "source_code": "@cached_property\ndef local_concrete_fields(self):\n return make_immutable_fields_list('local_concrete_fields', (f for f in self.local_fields if f.concrete))", + "docstring": "Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:local_concrete_fields arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, text='', font_attr=None):\n self.text = text\n if font_attr:\n self.font_attr_segs = [(0, len(text), font_attr)]\n else:\n self.font_attr_segs = []", + "docstring": "Construct a RichLine with no rich attributes or a single attribute. Args: text: Raw text string font_attr: If specified, a single font attribute to be applied to the entire text. Extending this object via concatenation allows creation of text with varying attributes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:text arg:font_attr arguments arg arg arg Assign If Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "ragged_batch", + "source_code": "def ragged_batch(self, batch_size, drop_remainder=False, row_splits_dtype=dtypes.int64, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import ragged_batch_op\n return ragged_batch_op._ragged_batch(self, batch_size, drop_remainder, row_splits_dtype, name)", + "docstring": "Combines consecutive elements of this dataset into s. Like , the components of the resulting element will have an additional outer dimension, which will be (or for the last element if does not divide the number of input elements evenly and is ). If your program depends on the batches having the same outer dimension, you should set the argument to to prevent the smaller batch from being produced. Unlike , the input elements to be batched may have different shapes: * If an input element is a whose static is fully defined, then it is batched as normal. * If an input element is a whose static contains one or more axes with unknown size (i.e., ), then the output will contain a that is ragged up to any of such dimensions. * If an input element is a or any other type, then it is batched as normal. Example: >>> dataset = tf.data.Dataset.range(6) >>> dataset = dataset.map(lambda x: tf.range(x)) >>> dataset.element_spec.shape TensorShape([None]) >>> dataset = dataset.ragged_batch(2) >>> for batch in dataset: ... print(batch) Args: batch_size: A scalar , representing the number of consecutive elements of this dataset to combine in a single batch. drop_remainder: (Optional.) A scalar , representing whether the last batch should be dropped in the case it has fewer than elements; the default behavior is not to drop the smaller batch. row_splits_dtype: The dtype that should be used for the of any new ragged tensors. Existing elements do not have their row_splits dtype changed. name: (Optional.) A string indicating a name for the operation. Returns: A new with the transformation applied as described above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:ragged_batch arg:self arg:batch_size arg:drop_remainder arg:row_splits_dtype arg:name arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "violation", + "source_code": "def violation(self, x):\n with catch_warnings():\n filterwarnings('ignore', 'delta_grad', UserWarning)\n ev = self.fun.fun(np.asarray(x))\n excess_lb = np.maximum(self.bounds[0] - ev, 0)\n excess_ub = np.maximum(ev - self.bounds[1], 0)\n return excess_lb + excess_ub", + "docstring": "How much the constraint is exceeded by. Parameters ---------- x : array-like Vector of independent variables Returns ------- excess : array-like How much the constraint is exceeded by, for each of the constraints specified by .", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_constraints.py", + "ast_data": "FunctionDef name:violation arg:self arg:x arguments arg arg With Call Call Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_unwrap_shared_qspec", + "source_code": "def _unwrap_shared_qspec(qspec: QuantizationSpecBase, edge_or_node_to_qspec: dict[EdgeOrNode, QuantizationSpecBase], shared_with_map: dict[EdgeOrNode, EdgeOrNode]) -> QuantizationSpecBase:\n if isinstance(qspec, SharedQuantizationSpec):\n sharing_with = qspec.edge_or_node\n root = _find_root_edge_or_node(sharing_with, shared_with_map)\n qspec = edge_or_node_to_qspec[root]\n return _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)\n return qspec", + "docstring": "Unwraps qspec to get the final root qspec (non SharedQuantizationSpec) if qspec is SharedQuantizationSpec (1). tries to find the root edge or node for the node that the qspec points to (2). recursively find the root qspec based on the qspec for the root node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py", + "ast_data": "FunctionDef name:_unwrap_shared_qspec arg:qspec arg:edge_or_node_to_qspec arg:shared_with_map arguments arg arg arg If Call Assign Assign Call Assign Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self):\n self._last_step_outputs = {}\n self._last_step_outputs_reduce_ops = {}\n self._non_tensor_outputs = {}", + "docstring": "Initialize an output context. Returns: A context object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign" + }, + { + "library": "cryptography", + "name": "key_size", + "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n pass", + "docstring": "The bit length of the prime modulus.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py", + "ast_data": "FunctionDef name:key_size arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "fuse_known_modules", + "source_code": "def fuse_known_modules(mod_list, is_qat, additional_fuser_method_mapping=None):\n types = tuple((type_before_parametrizations(m) for m in mod_list))\n fuser_method = get_fuser_method(types, additional_fuser_method_mapping)\n if fuser_method is None:\n raise NotImplementedError(f'Cannot fuse modules: {types}')\n new_mod: list[Optional[nn.Module]] = [None] * len(mod_list)\n fused = fuser_method(is_qat, *mod_list)\n for pre_hook_fn in mod_list[0]._forward_pre_hooks.values():\n fused.register_forward_pre_hook(pre_hook_fn)\n mod_list[0]._forward_pre_hooks.clear()\n for hook_fn in mod_list[-1]._forward_hooks.values():\n fused.register_forward_hook(hook_fn)\n mod_list[-1]._forward_hooks.clear()\n new_mod[0] = fused\n for i in range(1, len(mod_list)):\n identity = nn.Identity()\n identity.training = mod_list[0].training\n new_mod[i] = identity\n return new_mod", + "docstring": "Return a list of known fuse modules. Returns a list of modules that fuses the operations specified in the input module list. Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, bn linear, relu For these sequences, the first element in the output module list performs the fused operation. The rest of the elements are set to nn.Identity()", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fuse_modules.py", + "ast_data": "FunctionDef name:fuse_known_modules arg:mod_list arg:is_qat arg:additional_fuser_method_mapping arguments arg arg arg Assign Call Call Assign Call If Compare Raise Call Call Assign Call For Call Call Call For Call Call Call Assign For Call Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "batch_parallel", + "source_code": "@tf_export(v1=['tpu.batch_parallel'])\n@traceback_utils.filter_traceback\ndef batch_parallel(computation: Callable[..., Any], inputs: Optional[List[List[Optional[core_types.Tensor]]]]=None, num_shards: int=1, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, device_assignment: Optional[device_assignment_lib.DeviceAssignment]=None, name: Optional[Text]=None, xla_options: Optional[XLAOptions]=None):\n return shard(computation, inputs, num_shards=num_shards, infeed_queue=infeed_queue, device_assignment=device_assignment, name=name, xla_options=xla_options)", + "docstring": "Shards along the batch dimension for parallel execution. Convenience wrapper around shard(). must be a list of Tensors or None (equivalent to an empty list). Each input is split into pieces along the 0-th dimension, and computation is applied to each shard in parallel. Tensors are broadcast to all shards if they are lexically captured by . e.g., x = tf.constant(7) def computation(): return x + 3 ... = shard(computation, ...) The outputs from all shards are concatenated back together along their 0-th dimension. Inputs and outputs of the computation must be at least rank-1 Tensors. Args: computation: A Python function that builds a computation to apply to each shard of the input. inputs: A list of input tensors or None (equivalent to an empty list). The 0-th dimension of each Tensor must have size divisible by . num_shards: The number of shards. infeed_queue: If not , the from which to append a tuple of arguments as inputs to . device_assignment: If not , a describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if . The may be omitted if each shard of the computation uses only one core, and there is either only one shard, or the number of shards is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. xla_options: An instance of which indicates the options passed to XLA compiler. Use for default options. Returns: A list of output tensors. Raises: ValueError: If", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:batch_parallel arg:computation arg:inputs arg:num_shards arg:infeed_queue arg:device_assignment arg:name arg:xla_options arguments arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "temp_data", + "source_code": "@property\ndef temp_data(self) -> _CurrentDocument:\n return self.current_document", + "docstring": "Returns the temporary data storage for the current document. Kept for backwards compatibility.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:temp_data arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "freezing_passes", + "source_code": "def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):\n from ..freezing import constant_fold\n lazy_init()\n binary_folding = counters['inductor']['binary_folding']\n fake_tensor_prop(gm, aot_example_inputs, True)\n torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_computation_ops(gm)\n for _ in range(4):\n constant_fold(gm)\n fake_tensor_prop(gm, aot_example_inputs, True)\n binary_folding_pass.apply(gm.graph)\n if counters['inductor']['binary_folding'] == binary_folding:\n break\n binary_folding = counters['inductor']['binary_folding']\n torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_computation_ops(gm)\n constant_fold(gm)\n fake_tensor_prop(gm, aot_example_inputs, True)\n for pattern in pass_patterns:\n pattern.apply(gm.graph)\n if torch._C._has_mkldnn and config.cpp.weight_prepack and config.layout_optimization:\n from .mkldnn_fusion import _eliminate_duplicate_packed_nodes\n _eliminate_duplicate_packed_nodes(gm)\n stable_topological_sort(gm.graph)\n gm.recompile()\n gm.graph.lint()", + "docstring": "Passes that are applied to the graph to freeze pass.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\freezing_patterns.py", + "ast_data": "FunctionDef name:freezing_passes arg:gm arg:aot_example_inputs arguments arg arg Call Assign Call Call For Call Call Call Call If Compare Assign Call Call Call For Call If BoolOp Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_link_flags", + "source_code": "@tf_export('sysconfig.get_link_flags')\ndef get_link_flags():\n is_mac = _platform.system() == 'Darwin'\n ver = _VERSION.split('.')[0]\n flags = []\n if not _MONOLITHIC_BUILD:\n flags.append('-L%s' % get_lib())\n if is_mac:\n flags.append('-ltensorflow_framework.%s' % ver)\n else:\n flags.append('-l:libtensorflow_framework.so.%s' % ver)\n return flags", + "docstring": "Returns the linker flags for linking with TensorFlow. The returned list of arguments can be passed to the linker for linking against TensorFlow. The result is platform dependent. For example, on a typical Linux system with Python 3.7 the following command prints >>> print(tf.sysconfig.get_link_flags()) Returns: A list of strings for the linker flags.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py", + "ast_data": "FunctionDef name:get_link_flags arguments Assign Compare Call Assign Call Assign If Call Call If Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_height", + "source_code": "def set_height(self, height):\n self.height = height\n self.stale = True", + "docstring": "Set the height of the box. Parameters ---------- height : float", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:set_height arg:self arg:height arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "seed", + "source_code": "@property\ndef seed(self) -> Optional[int]:\n return self._seed", + "docstring": "The graph-level random seed of this graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:seed arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_recompute_transform", + "source_code": "def _recompute_transform(self):\n assert self._patch_type in ('arc', 'circle')\n center = (self.convert_xunits(self._center[0]), self.convert_yunits(self._center[1]))\n width = self.convert_xunits(self._width)\n height = self.convert_yunits(self._height)\n self._patch_transform = mtransforms.Affine2D().scale(width * 0.5, height * 0.5).translate(*center)", + "docstring": "Notes ----- This cannot be called until after this has been added to an Axes, otherwise unit conversion will fail. This makes it very important to call the accessor method and not directly access the transformation member variable.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "FunctionDef name:_recompute_transform arg:self arguments arg Compare Assign Call Call Assign Call Assign Call Assign Call Call Call" + }, + { + "library": "django", + "name": "int64_output", + "source_code": "def int64_output(func, argtypes):\n func.argtypes = argtypes\n func.restype = c_int64\n return func", + "docstring": "Generate a ctypes function that returns a 64-bit integer value.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", + "ast_data": "FunctionDef name:int64_output arg:func arg:argtypes arguments arg arg Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "insert_custom_op_guards", + "source_code": "def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> None:\n for node in gm.graph.nodes:\n if node.op == 'call_function' and str(node.target) in ops_to_guard:\n with _set_node_metadata_hook(gm, functools.partial(_node_metadata_hook, stack_trace=node.meta.get('stack_trace'))), gm.graph.inserting_before(node):\n for arg in (*node.args, *node.kwargs.values()):\n if isinstance(arg, torch.fx.Node) and isinstance(arg.meta.get('val'), torch.Tensor):\n val = arg.meta['val']\n gm.graph.call_function(torch.ops.aten._assert_tensor_metadata.default, args=(arg,), kwargs={'dtype': val.dtype, 'device': val.device, 'layout': val.layout})\n gm.recompile()", + "docstring": "This is used by draft_export to insert guards in front of calls to custom operators which have a generated fake kernel.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\insert_custom_op_guards.py", + "ast_data": "FunctionDef name:insert_custom_op_guards arg:gm arg:ops_to_guard arguments arg arg For If BoolOp Compare Compare Call With Call Call Call Call For Call If BoolOp Call Call Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_initialize_single_worker", + "source_code": "def _initialize_single_worker(self, devices):\n self._devices = tuple((device_util.canonicalize(d) for d in devices))\n self._input_workers_devices = ((device_util.canonicalize('/device:CPU:0', devices[0]), devices),)\n self._host_input_device = numpy_dataset.SingleDevice(self._input_workers_devices[0][0])\n device_spec = tf_device.DeviceSpec.from_string(self._input_workers_devices[0][0])\n if device_spec.job is not None and device_spec.job != 'localhost':\n self._default_device = '/job:%s/replica:%d/task:%d' % (device_spec.job, device_spec.replica, device_spec.task)\n logging.info('Using MirroredStrategy with devices %r', devices)", + "docstring": "Initializes the object for single-worker training.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py", + "ast_data": "FunctionDef name:_initialize_single_worker arg:self arg:devices arguments arg arg Assign Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "single_shot", + "source_code": "@property\ndef single_shot(self):\n return self._single", + "docstring": "Whether this timer should stop after a single run.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:single_shot arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_parse_args", + "source_code": "def _parse_args(self, *args, **kwargs):\n if len(args) == 1:\n self._start = self._build_tensor(0, 'start')\n self._stop = self._build_tensor(args[0], 'stop')\n self._step = self._build_tensor(1, 'step')\n elif len(args) == 2:\n self._start = self._build_tensor(args[0], 'start')\n self._stop = self._build_tensor(args[1], 'stop')\n self._step = self._build_tensor(1, 'step')\n elif len(args) == 3:\n self._start = self._build_tensor(args[0], 'start')\n self._stop = self._build_tensor(args[1], 'stop')\n self._step = self._build_tensor(args[2], 'step')\n else:\n raise ValueError(f'Invalid `args`. The length of `args` should be between 1 and 3 but was {len(args)}.')\n if 'output_type' in kwargs:\n self._output_type = kwargs['output_type']\n else:\n self._output_type = dtypes.int64\n self._name = kwargs['name'] if 'name' in kwargs else None", + "docstring": "Parses arguments according to the same rules as the builtin.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\range_op.py", + "ast_data": "FunctionDef name:_parse_args arg:self arguments arg arg arg If Compare Call Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Call Assign Call If Compare Call Assign Call Assign Call Assign Call Raise Call Call If Compare Assign Assign Assign Compare" + }, + { + "library": "scipy", + "name": "_check_work_float", + "source_code": "def _check_work_float(value, dtype, int_dtype):\n if dtype == np.float32 or dtype == np.complex64:\n value = np.nextafter(value, np.inf, dtype=np.float32)\n value = int(value)\n if int_dtype.itemsize == 4:\n if value < 0 or value > _int32_max:\n raise ValueError('Too large work array required -- computation cannot be performed with standard 32-bit LAPACK.')\n elif int_dtype.itemsize == 8:\n if value < 0 or value > _int64_max:\n raise ValueError('Too large work array required -- computation cannot be performed with standard 64-bit LAPACK.')\n return value", + "docstring": "Convert LAPACK-returned work array size float to integer, carefully for single-precision types.", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\lapack.py", + "ast_data": "FunctionDef name:_check_work_float arg:value arg:dtype arg:int_dtype arguments arg arg arg If BoolOp Compare Compare Assign Call Assign Call If Compare If BoolOp Compare Compare Raise Call If Compare If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_state_dict_type", + "source_code": "@staticmethod\ndef get_state_dict_type(module: nn.Module) -> StateDictSettings:\n state_dict_settings: Optional[StateDictSettings] = None\n for submodule in FullyShardedDataParallel.fsdp_modules(module):\n if state_dict_settings is None:\n state_dict_settings = StateDictSettings(state_dict_type=submodule._state_dict_type, state_dict_config=submodule._state_dict_config, optim_state_dict_config=submodule._optim_state_dict_config)\n _set_optim_use_dtensor(submodule, state_dict_settings)\n else:\n submodule_settings = StateDictSettings(submodule._state_dict_type, submodule._state_dict_config, submodule._optim_state_dict_config)\n assert state_dict_settings == submodule_settings, f'All FSDP modules must have the same state dict settings.Got {submodule_settings} and {state_dict_settings}.'\n _set_optim_use_dtensor(submodule, submodule_settings)\n return state_dict_settings", + "docstring": "Get the state_dict_type and the corresponding configurations for the FSDP modules rooted at `` for different FSDP submodules differ.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:get_state_dict_type arg:module arguments arg For Call If Compare Assign Call Call Assign Call Compare Call Return return:yes" + }, + { + "library": "numpy", + "name": "__rsub__", + "source_code": "def __rsub__(self, other):\n return subtract(other, self)", + "docstring": "Subtract self from other, and return a new masked array.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__rsub__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "from_bbox", + "source_code": "@classmethod\ndef from_bbox(cls, bbox):\n x0, y0, x1, y1 = bbox\n for z in bbox:\n if not isinstance(z, (float, int)):\n return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))\n return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))", + "docstring": "Construct a Polygon from a bounding box (4-tuple).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py", + "ast_data": "FunctionDef name:from_bbox arg:cls arg:bbox arguments arg arg Assign For If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_signature_def_fn", + "source_code": "@abc.abstractmethod\ndef _get_signature_def_fn(self):\n pass", + "docstring": "Returns a function that produces a SignatureDef given desired outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py", + "ast_data": "FunctionDef name:_get_signature_def_fn arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "visit_Import", + "source_code": "def visit_Import(self, node):\n for import_alias in node.names:\n full_import = (import_alias.name, import_alias.asname)\n detection = self._api_analysis_spec.imports_to_detect.get(full_import, None)\n if detection:\n self.add_result(detection)\n self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message)\n self.generic_visit(node)", + "docstring": "Handle visiting an import node in the AST. Args: node: Current Node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:visit_Import arg:self arg:node arguments arg arg For Assign Assign Call If Call Call Call" + }, + { + "library": "scipy", + "name": "gradient_and_jacobian", + "source_code": "def gradient_and_jacobian(self, z):\n x = self.get_variables(z)\n s = self.get_slack(z)\n g = self.grad(x)\n J_eq, J_ineq = self.jac(x)\n return (self._compute_gradient(g), self._compute_jacobian(J_eq, J_ineq, s))", + "docstring": "Returns scaled gradient. Return scaled gradient: gradient = [ grad(x) ] [ -barrier_parameter*ones(n_ineq) ] and scaled Jacobian matrix: jacobian = [ jac_eq(x) 0 ] [ jac_ineq(x) S ] Both of them scaled by the previously defined scaling factor.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py", + "ast_data": "FunctionDef name:gradient_and_jacobian arg:self arg:z arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_as_index", + "source_code": "def _as_index(idx, need_scalar=True):\n if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):\n return (idx, True)\n data = asarray(idx)\n if data.dtype == dtypes.bool:\n if data.shape.ndims != 1:\n raise NotImplementedError('Need rank 1 for bool index %s' % idx)\n data = array_ops.where_v2(data)\n data = array_ops.reshape(data, [-1])\n if need_scalar and data.shape.rank not in (None, 0):\n raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n np_dtype = data.dtype.as_numpy_dtype\n if not np.issubdtype(np_dtype, np.integer):\n raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n if data.dtype not in (dtypes.int64, dtypes.int32):\n promoted_dtype = np.promote_types(np.int32, np_dtype)\n if promoted_dtype == np.int32:\n data = math_ops.cast(data, dtypes.int32)\n elif promoted_dtype == np.int64:\n data = math_ops.cast(data, dtypes.int64)\n else:\n raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx))\n return (data, data.shape.rank == 0)", + "docstring": "Helper function to parse idx as an index. Args: idx: index need_scalar: If idx needs to be a scalar value. Returns: A pair, (indx, bool). First one is the parsed index and can be a tensor, or scalar integer / Dimension. Second one is True if rank is known to be 0. Raises: IndexError: For incorrect indices.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:_as_index arg:idx arg:need_scalar arguments arg arg If Call Return return:yes Assign Call If Compare If Compare Raise Call Assign Call Assign Call If BoolOp Compare Raise Call Call Assign If Call Raise Call Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "decorator", + "source_code": "def decorator(dispatch_target):\n if not callable(dispatch_target):\n raise TypeError(f'Expected dispatch_target to be callable; got {dispatch_target!r}')\n dispatch_target = _add_name_scope_wrapper(dispatch_target, api_signature)\n _check_signature(api_signature, dispatch_target)\n for signature_checker in signature_checkers:\n dispatcher.Register(signature_checker, dispatch_target)\n _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].extend(signatures)\n if not signature_checkers:\n signature = _signature_from_annotations(dispatch_target)\n checker = _make_signature_checker(api_signature, signature)\n dispatcher.Register(checker, dispatch_target)\n _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].append(signature)\n return dispatch_target", + "docstring": "Decorator that registers the given dispatch target.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:decorator arg:dispatch_target arguments arg If Call Raise Call Assign Call Call For Call Call If Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "do_encode", + "source_code": "def do_encode(self, tensor_value, encode_fn):\n del encode_fn\n encoded_tensor = struct_pb2.StructuredValue()\n if isinstance(tensor_value, ops.EagerTensor):\n encoded_tensor.tensor_value.CopyFrom(tensor_util.make_tensor_proto(tensor_value.numpy()))\n elif tensor_value.op.type == 'Const':\n encoded_tensor.tensor_value.CopyFrom(tensor_value.op.get_attr('value'))\n else:\n raise nested_structure_coder.NotEncodableError(f'No encoder for object {str(tensor_value)} of type {type(tensor_value)}.')\n return encoded_tensor", + "docstring": "Returns an encoded for the given .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py", + "ast_data": "FunctionDef name:do_encode arg:self arg:tensor_value arg:encode_fn arguments arg arg arg Assign Call If Call Call Call Call If Compare Call Call Raise Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "replace_set_grad_with_hop_pass", + "source_code": "def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n return _replace_with_hop_pass_helper(gm, graph_signature, _sequential_split_and_maybe_inline_subgraphs)", + "docstring": "Split gm into sub-graph-modules using , and then recursively call itself on each of the submodules.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_set_grad_with_hop_pass.py", + "ast_data": "FunctionDef name:replace_set_grad_with_hop_pass arg:gm arg:graph_signature arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Perform one step of this training algorithm.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "prepare_lookup_value", + "source_code": "def prepare_lookup_value(key, value, separator=','):\n if isinstance(value, list):\n return [prepare_lookup_value(key, v, separator=separator) for v in value]\n if key.endswith('__in'):\n value = value.split(separator)\n elif key.endswith('__isnull'):\n value = value.lower() not in ('', 'false', '0')\n return value", + "docstring": "Return a lookup value prepared to be used in queryset filtering.", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\utils.py", + "ast_data": "FunctionDef name:prepare_lookup_value arg:key arg:value arg:separator arguments arg arg arg If Call Return return:yes Call If Call Assign Call If Call Assign Compare Call Return return:yes" + }, + { + "library": "authlib", + "name": "sign_rsa_sha1", + "source_code": "def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)", + "docstring": "Sign a RSASSA-PKCS #1 v1.5 base64 encoded signature.", + "type": "function", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py", + "ast_data": "FunctionDef name:sign_rsa_sha1 arg:client arg:request arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_log_if_debug_on", + "source_code": "def _log_if_debug_on(meth):\n\n @functools.wraps(meth)\n def wrapper(self, *args, **kwargs):\n if debugPS:\n self._pswriter.write(f'% {meth.__name__}\\n')\n return meth(self, *args, **kwargs)\n return wrapper", + "docstring": "Wrap method *meth* to emit a PS comment with the method name, if the global flag is set.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", + "ast_data": "FunctionDef name:_log_if_debug_on arg:meth arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "maybe_set_size", + "source_code": "def maybe_set_size(self, min_itemsize=None) -> None:\n if self.kind == 'string':\n if isinstance(min_itemsize, dict):\n min_itemsize = min_itemsize.get(self.name)\n if min_itemsize is not None and self.typ.itemsize < min_itemsize:\n self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)", + "docstring": "maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name with an integer size", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:maybe_set_size arg:self arg:min_itemsize arguments arg arg If Compare If Call Assign Call If BoolOp Compare Compare Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_EigGrad", + "source_code": "@ops.RegisterGradient('Eig')\ndef _EigGrad(op: ops.Operation, grad_e, grad_v):\n e = op.outputs[0]\n compute_v = op.get_attr('compute_v')\n with ops.control_dependencies([grad_e, grad_v]):\n if compute_v:\n v = op.outputs[1]\n vt = _linalg.adjoint(v)\n f = array_ops.matrix_set_diag(_SafeReciprocal(array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)), array_ops.zeros_like(e))\n f = math_ops.conj(f)\n vgv = math_ops.matmul(vt, grad_v)\n mid = array_ops.matrix_diag(grad_e)\n diag_grad_part = array_ops.matrix_diag(array_ops.matrix_diag_part(math_ops.cast(math_ops.real(vgv), vgv.dtype)))\n mid += f * (vgv - math_ops.matmul(math_ops.matmul(vt, v), diag_grad_part))\n grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(mid, vt))\n else:\n _, v = linalg_ops.eig(op.inputs[0])\n vt = _linalg.adjoint(v)\n grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(array_ops.matrix_diag(grad_e), vt))\n return math_ops.cast(grad_a, op.inputs[0].dtype)", + "docstring": "Gradient for Eig. Based on eq. 4.77 from paper by Christoph Boeddeker et al. See also \"Computation of eigenvalue and eigenvector derivatives for a general complex-valued eigensystem\" by Nico van der Aa. As for now only distinct eigenvalue case is considered.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_EigGrad arg:op arg:grad_e arg:grad_v arguments arg arg arg Assign Assign Call With Call If Assign Assign Call Assign Call Call Call Call Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "zeta", + "source_code": "def zeta(x, q=None, out=None):\n if q is None:\n return _ufuncs._riemann_zeta(x, out)\n else:\n return _ufuncs._zeta(x, q, out)", + "docstring": "Riemann or Hurwitz zeta function. Parameters ---------- x : array_like of float or complex. Input data q : array_like of float, optional Input data, must be real. Defaults to Riemann zeta. When is `xqxpolygamma` function: >>> m = 3 >>> x = 1.25 >>> polygamma(m, x) array(2.782144009188397) >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) 2.7821440091883969", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:zeta arg:x arg:q arg:out arguments arg arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "normal", + "source_code": "def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):\n with ops.name_scope(name, 'stateful_normal', [shape, mean, stddev]) as name:\n shape = _shape_tensor(shape)\n mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n rnd = self._standard_normal(shape, dtype=dtype)\n return math_ops.add(rnd * stddev, mean, name=name)", + "docstring": "Outputs random values from a normal distribution. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type . The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type . The standard deviation of the normal distribution. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:normal arg:self arg:shape arg:mean arg:stddev arg:dtype arg:name arguments arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_fusion_pair_priority", + "source_code": "def get_fusion_pair_priority(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> int:\n return 0", + "docstring": "Return an unsigned integer which represents the priority of this fusion pair. The smaller is with higher priority.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:get_fusion_pair_priority arg:self arg:node1 arg:node2 arguments arg arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "size", + "source_code": "def size(obj, axis=None):\n return np.size(getdata(obj), axis)", + "docstring": "maskedarray version of the numpy function.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:size arg:obj arg:axis arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_call_loss", + "source_code": "def _call_loss(inputs, ragged_output):\n r = loss_fn(*inputs)\n if ragged_output and (not isinstance(r, ragged_tensor.RaggedTensor)):\n r = ragged_tensor.RaggedTensor.from_tensor(r)\n elif not ragged_output and isinstance(r, ragged_tensor.RaggedTensor):\n r = r.to_tensor()\n return r", + "docstring": "Adapt the result to ragged or dense tensor according to the expected output type. This is done so that all the return values of the map operation have the same type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_call_loss arg:inputs arg:ragged_output arguments arg arg Assign Call If BoolOp Call Assign Call If BoolOp Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "next_rendezvous", + "source_code": "@abstractmethod\ndef next_rendezvous(self) -> RendezvousInfo:\n pass", + "docstring": "Main entry-point into the rendezvous barrier. Blocks until the rendezvous is complete and the current process is included in the formed worker group, or a timeout occurs, or the rendezvous was marked closed. Returns: Instance of :py:class:. Raises: RendezvousClosedError: The rendezvous is closed. RendezvousConnectionError: The connection to the rendezvous backend has failed. RendezvousStateError: The rendezvous state is corrupt. RendezvousTimeoutError: The rendezvous did not complete on time.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "FunctionDef name:next_rendezvous arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "replace_target_nodes_with", + "source_code": "@compatibility(is_backward_compatible=False)\ndef replace_target_nodes_with(fx_module: GraphModule, old_op: str, old_target: Target, new_op: str, new_target: Target):\n new_graph = Graph()\n val_map: dict[Node, Node] = {}\n for node in fx_module.graph.nodes:\n if node.op == old_op and node.target == old_target:\n args = map_arg(node.args, lambda n: val_map[n])\n kwargs = map_arg(node.kwargs, lambda n: val_map[n])\n assert isinstance(args, tuple)\n assert isinstance(kwargs, dict)\n val_map[node] = new_graph.create_node(new_op, new_target, args, kwargs, node.name)\n else:\n val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])\n fx_module.graph = new_graph", + "docstring": "Modifies all nodes in fx_module.graph.nodes which match the specified op code and target, and updates them to match the new op code and target", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\graph_manipulation.py", + "ast_data": "FunctionDef name:replace_target_nodes_with arg:fx_module arg:old_op arg:old_target arg:new_op arg:new_target arguments arg arg arg arg arg Assign Call For If BoolOp Compare Compare Assign Call arguments arg Assign Call arguments arg Call Call Assign Call Assign Call arguments arg Assign Call" + }, + { + "library": "matplotlib", + "name": "limit_range_for_scale", + "source_code": "def limit_range_for_scale(self, vmin, vmax, minpos):\n if not np.isfinite(minpos):\n minpos = 1e-07\n return (minpos if vmin <= 0 else vmin, 1 - minpos if vmax >= 1 else vmax)", + "docstring": "Limit the domain to values between 0 and 1 (excluded).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg If Call Assign Return return:yes Compare Compare" + }, + { + "library": "tensorflow", + "name": "get_resource", + "source_code": "def get_resource(self, feature_column, resource_name):\n if feature_column not in self._cols_to_resources_map or resource_name not in self._cols_to_resources_map[feature_column]:\n raise ValueError('Resource does not exist.')\n return self._cols_to_resources_map[feature_column][resource_name]", + "docstring": "Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. resource_name: Name of the resource.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_resource arg:self arg:feature_column arg:resource_name arguments arg arg arg If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "sphinx", + "name": "getdoc", + "source_code": "def getdoc(obj: Any, attrgetter: _AttrGetter=safe_getattr, allow_inherited: bool=False, cls: Any=None, name: str | None=None) -> str | None:\n if cls and name and is_classmethod_like(obj, cls, name):\n for basecls in getmro(cls):\n meth = basecls.__dict__.get(name)\n if not meth:\n continue\n if hasattr(meth, '__func__') or is_classmethod_descriptor(meth):\n doc: str | None = getdoc(getattr(meth, '__func__', meth))\n if doc is not None or not allow_inherited:\n return doc\n doc = _getdoc_internal(obj)\n if ispartial(obj) and doc == obj.__class__.__doc__:\n return getdoc(obj.func)\n elif doc is None and allow_inherited:\n if cls and name:\n for basecls in getmro(cls):\n meth = safe_getattr(basecls, name, None)\n if meth is not None:\n doc = _getdoc_internal(meth)\n if doc is not None:\n break\n if doc is None:\n for basecls in getmro(cls):\n meth = safe_getattr(basecls, name, None)\n if meth is not None:\n doc = inspect.getdoc(meth)\n if doc is not None:\n break\n if doc is None:\n doc = inspect.getdoc(obj)\n return doc", + "docstring": "Get the docstring for the object. This tries to obtain the docstring for some kind of objects additionally: * partial functions * inherited docstring * inherited decorated methods", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:getdoc arg:obj arg:attrgetter arg:allow_inherited arg:cls arg:name arguments arg arg arg arg arg If BoolOp Call For Call Assign Call If If BoolOp Call Call Call Call If BoolOp Compare Return return:yes Assign Call If BoolOp Call Compare Return return:yes Call If BoolOp Compare If BoolOp For Call Assign Call If Compare Assign Call If Compare If Compare For Call Assign Call If Compare Assign Call If Compare If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "predict", + "source_code": "def predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False):\n self._assert_built_as_v1()\n self._check_call_args('predict')\n func = self._select_training_loop(x)\n return func.predict(self, x=x, batch_size=batch_size, verbose=verbose, steps=steps, callbacks=callbacks, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)", + "docstring": "Generates output predictions for the input samples. Computation is done in batches (see the arg.) Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dataset. - A generator or instance. batch_size: Integer or . Number of samples per batch of computation. If unspecified, will default to 32. Do not specify the if your data is in the form of symbolic tensors, dataset, generators, or instances (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of . If x is a dataset and is None, will run until the input dataset is exhausted. callbacks: List of instances. List of callbacks to apply during prediction. See . max_queue_size: Integer. Used for generator or input only. Maximum size for the generator queue. If unspecified, will default to 10. workers: Integer. Used for generator or input only. Maximum number of processes to spin up when using process-based threading. If unspecified, will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. Used for generator or input only. If , use process-based threading. If unspecified, will default to . Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:predict arg:self arg:x arg:batch_size arg:verbose arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arguments arg arg arg arg arg arg arg arg arg Call Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "maybe_enable_thunkify", + "source_code": "@contextmanager\ndef maybe_enable_thunkify() -> Generator[None, None, None]:\n proxy_mode = get_proxy_mode()\n if proxy_mode is not None:\n with _enable_thunkify(proxy_mode.tracer):\n yield\n else:\n yield", + "docstring": "Within this context manager, if you are doing make_fx tracing, we will thunkify all SymNode compute and avoid tracing it into the graph unless it is actually needed. You should prefer to avoid using this as much as possible, as lazy evaluation of SymNode tracing can lead to long chains of thunks which will stack overflow if you evaluate them. However, this is currently sometimes necessary as there are buggy parts of PT2 which will fail with \"s0 is not tracked with proxy\" error due to insufficient tracing of SymNode computation.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py", + "ast_data": "FunctionDef name:maybe_enable_thunkify arguments Assign Call If Compare With Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, iterator_resource, initializer, output_types, output_shapes, output_classes):\n self._iterator_resource = iterator_resource\n self._initializer = initializer\n if output_types is None or output_shapes is None or output_classes is None:\n raise ValueError(f'All of `output_types`, `output_shapes`, and `output_classes` must be specified to create an iterator. Got `output_types` = {output_types!r}, `output_shapes` = {output_shapes!r}, `output_classes` = {output_classes!r}.')\n self._element_spec = structure.convert_legacy_structure(output_types, output_shapes, output_classes)\n self._flat_tensor_shapes = structure.get_flat_tensor_shapes(self._element_spec)\n self._flat_tensor_types = structure.get_flat_tensor_types(self._element_spec)\n self._string_handle = gen_dataset_ops.iterator_to_string_handle(self._iterator_resource)\n self._get_next_call_count = 0\n ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)", + "docstring": "Creates a new iterator from the given iterator resource. Note: Most users will not call this initializer directly, and will instead use or . Args: iterator_resource: A scalar representing the iterator. initializer: A that should be run to initialize this iterator. output_types: A (nested) structure of objects corresponding to each component of an element of this iterator. output_shapes: A (nested) structure of objects corresponding to each component of an element of this iterator. output_classes: A (nested) structure of Python objects corresponding to each component of an element of this iterator. Raises: TypeError: If , , or is not specified.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:iterator_resource arg:initializer arg:output_types arg:output_shapes arg:output_classes arguments arg arg arg arg arg arg Assign Assign If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_make_elementwise_binary_prim", + "source_code": "def _make_elementwise_binary_prim(name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs):\n return _make_prim(schema=f'{name}(Tensor self, Tensor other) -> Tensor', meta=partial(_prim_elementwise_meta, type_promotion=type_promotion), return_type=RETURN_TYPE.NEW, **kwargs)", + "docstring": "Creates an elementwise binary prim.", + "type": "function", + "file_path": "pytorch\\torch\\_prims\\__init__.py", + "ast_data": "FunctionDef name:_make_elementwise_binary_prim arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "execute_subgraph_from_prim_loop", + "source_code": "def execute_subgraph_from_prim_loop(subgraph, iter_idx, len_loop_local_arguments, *args, **kwargs):\n loop_local_args = args[:len_loop_local_arguments]\n global_args = args[len_loop_local_arguments:]\n return subgraph(*global_args, iter_idx, *loop_local_args, **kwargs)", + "docstring": "subgraph: GraphModule from sub-block. iter_idx: The index of interation. len_loop_local_arguments: The number of loop local arguments in args.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\converter.py", + "ast_data": "FunctionDef name:execute_subgraph_from_prim_loop arg:subgraph arg:iter_idx arg:len_loop_local_arguments arguments arg arg arg arg arg Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_parse_example_spec", + "source_code": "@abc.abstractproperty\ndef _parse_example_spec(self):\n pass", + "docstring": "Returns a parsing spec as dict. It is used for get_parsing_spec for . Returned spec is a dict from keys ('string') to , , and other supported objects. Please check documentation of for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another (input_fc). One possible implementation of _parse_example_spec is as follows:", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_parse_example_spec arg:self arguments arg" + }, + { + "library": "django", + "name": "rollback", + "source_code": "@async_unsafe\ndef rollback(self):\n self.validate_thread_sharing()\n self.validate_no_atomic_block()\n self._rollback()\n self.errors_occurred = False\n self.needs_rollback = False\n self.run_on_commit = []", + "docstring": "Roll back a transaction and reset the dirty flag.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:rollback arg:self arguments arg Call Call Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_ignored_params", + "source_code": "def _get_ignored_params(root_module: torch.nn.Module, ignored_modules: set[torch.nn.Module], ignored_parameters: Optional[Iterable[torch.nn.Parameter]]=None) -> set[torch.nn.Parameter]:\n all_ignored_params: set[torch.nn.Parameter] = set()\n params_in_ignored_modules = {p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p)}\n all_ignored_params.update(params_in_ignored_modules)\n if ignored_parameters is not None:\n params_in_ignored_parameters = {p for p in ignored_parameters if not _is_fsdp_flattened(p)}\n all_ignored_params.update(params_in_ignored_parameters)\n for submodule in root_module.modules():\n optional_fsdp_state = _get_module_fsdp_state(submodule)\n if optional_fsdp_state is not None:\n assert hasattr(optional_fsdp_state, '_ignored_params')\n all_ignored_params.update(optional_fsdp_state._ignored_params)\n return all_ignored_params", + "docstring": "Return the parameters of the modules in `FlatParameter` s are excluded from the result.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_get_ignored_params arg:root_module arg:ignored_modules arg:ignored_parameters arguments arg arg arg Call Assign Call Call Call If Compare Assign Call Call For Call Assign Call If Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "benchmark_fused_nodes", + "source_code": "def benchmark_fused_nodes(self, nodes: Sequence[BaseSchedulerNode]) -> tuple[float, str]:\n raise NotImplementedError", + "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:benchmark_fused_nodes arg:self arg:nodes arguments arg arg Raise" + }, + { + "library": "scipy", + "name": "gaussian_filter", + "source_code": "@_ni_docstrings.docfiller\ndef gaussian_filter(input, sigma, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0, *, radius=None, axes=None):\n input = np.asarray(input)\n output = _ni_support._get_output(output, input)\n axes = _ni_support._check_axes(axes, input.ndim)\n num_axes = len(axes)\n orders = _ni_support._normalize_sequence(order, num_axes)\n sigmas = _ni_support._normalize_sequence(sigma, num_axes)\n modes = _ni_support._normalize_sequence(mode, num_axes)\n radiuses = _ni_support._normalize_sequence(radius, num_axes)\n axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii]) for ii in range(num_axes) if sigmas[ii] > 1e-15]\n if len(axes) > 0:\n for axis, sigma, order, mode, radius in axes:\n gaussian_filter1d(input, sigma, axis, order, output, mode, cval, truncate, radius=radius)\n input = output\n else:\n output[...] = input[...]\n return output", + "docstring": "Multidimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : int or sequence of ints, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_multiple)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. radius : None or int or sequence of ints, optional Radius of the Gaussian kernel. The radius are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. If specified, the size of the kernel along each axis will be `truncateinputinputaxessigmaordermoderadiusaxesaxesinputradius` will be used. Examples -------- >>> from scipy.ndimage import gaussian_filter >>> import numpy as np >>> a = np.arange(50, step=2).reshape((5,5)) >>> a array([[ 0, 2, 4, 6, 8], [10, 12, 14, 16, 18], [20, 22, 24, 26, 28], [30, 32, 34, 36, 38], [40, 42, 44, 46, 48]]) >>> gaussian_filter(a, sigma=1) array([[ 4, 6, 8, 9, 11], [10, 12, 14, 15, 17], [20, 22, 24, 25, 27], [29, 31, 33, 34, 36], [35, 37, 39, 40, 42]]) >>> from scipy import datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = gaussian_filter(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:gaussian_filter arg:input arg:sigma arg:order arg:output arg:mode arg:cval arg:truncate arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Compare If Compare Call For Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TensorListScatterGrad", + "source_code": "@ops.RegisterGradient('TensorListScatter')\n@ops.RegisterGradient('TensorListScatterV2')\ndef _TensorListScatterGrad(op: ops.Operation, dlist):\n tensor = op.inputs[0]\n indices = op.inputs[1]\n dtensor = gen_list_ops.tensor_list_gather(dlist, indices, element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]), element_dtype=tensor.dtype)\n if op.type == 'TensorListScatterV2':\n return (dtensor, None, None, None)\n else:\n return (dtensor, None, None)", + "docstring": "Gradient function for TensorListScatter.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py", + "ast_data": "FunctionDef name:_TensorListScatterGrad arg:op arg:dlist arguments arg arg Assign Assign Assign Call Call Call If Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_adjust_index", + "source_code": "def _adjust_index(index, thresholds, offsets):\n t_index = array_ops.shape(array_ops.boolean_mask(thresholds, math_ops.less_equal(thresholds, index)))[0] - 1\n return index + array_ops.gather(offsets, t_index)", + "docstring": "Adjusts index to account for elements to be skipped.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\shuffle_ops.py", + "ast_data": "FunctionDef name:_adjust_index arg:index arg:thresholds arg:offsets arguments arg arg arg Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_count_total_params", + "source_code": "def _count_total_params(reader, count_exclude_pattern=''):\n var_to_shape_map = reader.get_variable_to_shape_map()\n if count_exclude_pattern:\n regex_pattern = re.compile(count_exclude_pattern)\n new_var_to_shape_map = {}\n exclude_num_tensors = 0\n exclude_num_params = 0\n for v in var_to_shape_map:\n if regex_pattern.search(v):\n exclude_num_tensors += 1\n exclude_num_params += np.prod(var_to_shape_map[v])\n else:\n new_var_to_shape_map[v] = var_to_shape_map[v]\n var_to_shape_map = new_var_to_shape_map\n print('# Excluding %d tensors (%d params) that match %s when counting.' % (exclude_num_tensors, exclude_num_params, count_exclude_pattern))\n var_sizes = [np.prod(var_to_shape_map[v]) for v in var_to_shape_map]\n return np.sum(var_sizes, dtype=int)", + "docstring": "Count total number of variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\inspect_checkpoint.py", + "ast_data": "FunctionDef name:_count_total_params arg:reader arg:count_exclude_pattern arguments arg arg Assign Call If Assign Call Assign Assign Assign For If Call Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "tick", + "source_code": "def tick(self, locator: Locator | None=None, *, at: Sequence[float] | None=None, upto: int | None=None, count: int | None=None, every: float | None=None, between: tuple[float, float] | None=None, minor: int | None=None) -> Continuous:\n if locator is not None and (not isinstance(locator, Locator)):\n raise TypeError(f'Tick locator must be an instance of {Locator!r}, not {type(locator)!r}.')\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError('`count` requires `between` with log transform.')\n if every is not None:\n raise RuntimeError('`every` not supported with log transform.')\n new = copy(self)\n new._tick_params = {'locator': locator, 'at': at, 'upto': upto, 'count': count, 'every': every, 'between': between, 'minor': minor}\n return new", + "docstring": "Configure the selection of ticks for the scale's axis or legend. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. at : sequence of floats Place ticks at these specific locations (in data units). upto : int Choose \"nice\" locations for ticks, but do not exceed this number. count : int Choose exactly this number of ticks, bounded by or axis limits. every : float Choose locations at this interval of separation (in data units). between : pair of floats Bound upper / lower ticks when using or . minor : int Number of unlabeled ticks to draw between labeled \"major\" ticks. Returns ------- scale Copy of self with new tick configuration.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\scales.py", + "ast_data": "FunctionDef name:tick arg:self arg:locator arguments arg arg arg arg arg arg arg arg If BoolOp Compare Call Raise Call Call Assign Call If BoolOp If BoolOp Compare Compare Raise Call If Compare Raise Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_tf_lazy_or", + "source_code": "def _tf_lazy_or(cond, b):\n return tf_cond.cond(cond, lambda: cond, b)", + "docstring": "Lazy-eval equivalent of \"or\" for Tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py", + "ast_data": "FunctionDef name:_tf_lazy_or arg:cond arg:b arguments arg arg Return return:yes Call arguments" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X25519PrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "django", + "name": "get_actions", + "source_code": "def get_actions(self, request):\n if self.actions is None or IS_POPUP_VAR in request.GET:\n return {}\n actions = self._filter_actions_by_permissions(request, self._get_base_actions())\n return {name: (func, name, desc) for func, name, desc in actions}", + "docstring": "Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_actions arg:self arg:request arguments arg arg If BoolOp Compare Compare Return return:no Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "children", + "source_code": "def children(self) -> Iterator['Module']:\n for _name, module in self.named_children():\n yield module", + "docstring": "Return an iterator over immediate children modules. Yields: Module: a child module", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:children arg:self arguments arg For Call" + }, + { + "library": "tensorflow", + "name": "device_name_to_device_path", + "source_code": "def device_name_to_device_path(device_name):\n device_name_items = compat.as_text(device_name).split('/')\n device_name_items = [item.replace(':', '_') for item in device_name_items]\n return METADATA_FILE_PREFIX + DEVICE_TAG + ','.join(device_name_items)", + "docstring": "Convert device name to device path.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:device_name_to_device_path arg:device_name arguments arg Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_module_wrapper", + "source_code": "def _get_module_wrapper(module: str, output_dir: str, output_package: str, api_version: int, symbols_by_module: Mapping[str, set[_Entrypoint]], use_lazy_loading: bool) -> str:\n if api_version != 1 and (not use_lazy_loading):\n return ''\n deprecated = 'False'\n has_lite = 'False'\n public_apis_name = 'None'\n if api_version == 1 and (not output_dir.strip('/').endswith('compat/v1')):\n deprecated = 'True'\n if 'lite' in symbols_by_module and use_lazy_loading:\n has_lite = 'True'\n if use_lazy_loading:\n public_apis_name = '_PUBLIC_APIS'\n return _DEPRECATION_FOOTER % (module.removeprefix(output_package).strip('.'), public_apis_name, deprecated, has_lite)", + "docstring": "Returns the module wrapper for the given module.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py", + "ast_data": "FunctionDef name:_get_module_wrapper arg:module arg:output_dir arg:output_package arg:api_version arg:symbols_by_module arg:use_lazy_loading arguments arg arg arg arg arg arg If BoolOp Compare Return return:yes Assign Assign Assign If BoolOp Compare Call Call Assign If BoolOp Compare Assign If Assign Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "slice_indexer", + "source_code": "def slice_indexer(self, start: Hashable | None=None, end: Hashable | None=None, step: int | None=None) -> slice:\n start_slice, end_slice = self.slice_locs(start, end, step=step)\n if not is_scalar(start_slice):\n raise AssertionError('Start slice bound is non-scalar')\n if not is_scalar(end_slice):\n raise AssertionError('End slice bound is non-scalar')\n return slice(start_slice, end_slice, step)", + "docstring": "Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None If None, defaults to 1. Returns ------- slice A slice object. Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. See Also -------- Index.slice_locs : Computes slice locations for input labels. Index.get_slice_bound : Retrieves slice bound that corresponds to given label. Notes ----- This function assumes that the data is sorted, so use at your own peril. Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list(\"abcd\")) >>> idx.slice_indexer(start=\"b\", end=\"c\") slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list(\"abcd\"), list(\"efgh\")]) >>> idx.slice_indexer(start=\"b\", end=(\"c\", \"g\")) slice(1, 3, None)", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:slice_indexer arg:self arg:start arg:end arg:step arguments arg arg arg arg Assign Call If Call Raise Call If Call Raise Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "ifftn", + "source_code": "@array_function_dispatch(_fftn_dispatcher)\ndef ifftn(a, s=None, axes=None, norm=None, out=None):\n return _raw_fftnd(a, s, axes, ifft, norm, out=out)", + "docstring": "Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, `numpy.fftifftfftnsaxesifftsaxesssaxessaxesnumpy.fftaxessasaxesaxesaifftnfftshiftnumpy.fftifftifftn` is called. Examples -------- >>> import numpy as np >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = np.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) >>> plt.show()", + "type": "function", + "file_path": "numpy\\numpy\\fft\\_pocketfft.py", + "ast_data": "FunctionDef name:ifftn arg:a arg:s arg:axes arg:norm arg:out arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_all_partitions_concatenated", + "source_code": "def _all_partitions_concatenated(ns):\n\n def all_partitions(z, n):\n for c in combinations(z, n):\n x0 = set(c)\n x1 = z - x0\n yield [x0, x1]\n\n def all_partitions_n(z, ns):\n if len(ns) == 0:\n yield [z]\n return\n for c in all_partitions(z, ns[0]):\n for d in all_partitions_n(c[1], ns[1:]):\n yield (c[0:1] + d)\n z = set(range(np.sum(ns)))\n for partitioning in all_partitions_n(z, ns[:]):\n x = np.concatenate([list(partition) for partition in partitioning]).astype(int)\n yield x", + "docstring": "Generate all partitions of indices of groups of given sizes, concatenated is an iterable of ints.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_resampling.py", + "ast_data": "FunctionDef name:_all_partitions_concatenated arg:ns arguments arg FunctionDef name:all_partitions arg:z arg:n arguments arg arg For Call Assign Call Assign FunctionDef name:all_partitions_n arg:z arg:ns arguments arg arg If Compare Call Return return:no For Call For Call Assign Call Call Call For Call Assign Call Call Call" + }, + { + "library": "kornia", + "name": "exp", + "source_code": "@staticmethod\ndef exp(v: Tensor) -> So3:\n theta = batched_dot_product(v, v).sqrt()[..., None]\n theta_nonzeros = theta != 0.0\n theta_half = 0.5 * theta\n w = where(theta_nonzeros, theta_half.cos(), tensor(1.0, device=v.device, dtype=v.dtype))\n b = where(theta_nonzeros, theta_half.sin() / theta, tensor(0.0, device=v.device, dtype=v.dtype))\n xyz = b * v\n return So3(Quaternion(concatenate((w, xyz), -1)))", + "docstring": "Convert elements of lie algebra to elements of lie group. See more: Args: v: vector of shape :math:. Example: >>> v = torch.zeros((2, 3)) >>> s = So3.exp(v) >>> s Parameter containing: tensor([[1., 0., 0., 0.], [1., 0., 0., 0.]], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:exp arg:v arguments arg Assign Call Call Assign Compare Assign Assign Call Call Call Assign Call Call Call Assign Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "datetimes", + "source_code": "def datetimes(self, field_name, kind, order='ASC', tzinfo=None):\n if kind not in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'):\n raise ValueError(\"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'.\")\n if order not in ('ASC', 'DESC'):\n raise ValueError(\"'order' must be either 'ASC' or 'DESC'.\")\n if settings.USE_TZ:\n if tzinfo is None:\n tzinfo = timezone.get_current_timezone()\n else:\n tzinfo = None\n return self.annotate(datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo), plain_field=F(field_name)).values_list('datetimefield', flat=True).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')", + "docstring": "Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:datetimes arg:self arg:field_name arg:kind arg:order arg:tzinfo arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If If Compare Assign Call Assign Return return:yes Call Call Call Call Call Call Call Call Compare" + }, + { + "library": "tensorflow", + "name": "device", + "source_code": "@property\ndef device(self):\n return self._variable.device", + "docstring": "The device of this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "line_search_wolfe1", + "source_code": "def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):\n if gfk is None:\n gfk = fprime(xk, *args)\n gval = [gfk]\n gc = [0]\n fc = [0]\n\n def phi(s):\n fc[0] += 1\n return f(xk + s * pk, *args)\n\n def derphi(s):\n gval[0] = fprime(xk + s * pk, *args)\n gc[0] += 1\n return np.dot(gval[0], pk)\n derphi0 = np.dot(gfk, pk)\n stp, fval, old_fval = scalar_search_wolfe1(phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)\n return (stp, fc[0], gc[0], fval, old_fval, gval[0])", + "docstring": "As but do a line search to direction Parameters ---------- f : callable Function fprime : callable Gradient of xk : array_like Current point pk : array_like Search direction gfk : array_like, optional Gradient of at point old_fval : float, optional Value of at point old_old_fval : float, optional Value of at point preceding The rest of the parameters are the same as for . Returns ------- stp, f_count, g_count, fval, old_fval As in gval : array Gradient of at the final point Notes ----- Parameters and must satisfy ``.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linesearch.py", + "ast_data": "FunctionDef name:line_search_wolfe1 arg:f arg:fprime arg:xk arg:pk arg:gfk arg:old_fval arg:old_old_fval arg:args arg:c1 arg:c2 arg:amax arg:amin arg:xtol arguments arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Call Assign Assign Assign FunctionDef name:phi arg:s arguments arg Return return:yes Call FunctionDef name:derphi arg:s arguments arg Assign Call Return return:yes Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, sizes, **kwargs):\n super().__init__(**kwargs)\n self.set_sizes(sizes)\n self.set_transform(transforms.IdentityTransform())\n self._paths = [mpath.Path.unit_circle()]", + "docstring": "Parameters ---------- sizes : float or array-like The area of each circle in points^2. **kwargs Forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sizes arguments arg arg arg Call Call Call Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "transform_point", + "source_code": "def transform_point(self, point):\n if len(point) != self.input_dims:\n raise ValueError(\"The length of 'point' must be 'self.input_dims'\")\n return self.transform(point)", + "docstring": "Return a transformed point. This function is only kept for backcompatibility; the more general method is capable of transforming both a list of points and a single point. The point is given as a sequence of length :attr:. The transformed point is returned as a sequence of length :attr:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:transform_point arg:self arg:point arguments arg arg If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "evaluate_conditional_with_constraints", + "source_code": "def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):\n transformed_positive, transformed_negative = transform_all_constraints_trace_time(tracer_root, graph, node, counter)\n s = z3.Solver()\n s.add(transformed_positive)\n if user_constraints is not None:\n s.add(user_constraints)\n condition = s.check()\n s = z3.Solver()\n s.add(transformed_negative)\n if user_constraints is not None:\n s.add(user_constraints)\n negation = s.check()\n return (condition, negation)", + "docstring": "Given an IR and a node representing a conditional, evaluate the conditional and its negation Args: tracer_root: Tracer root for module instances node: The node to be evaluated Returns: the results of evaluating the condition and the negation with the rest of the constraints", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py", + "ast_data": "FunctionDef name:evaluate_conditional_with_constraints arg:tracer_root arg:graph arg:node arg:counter arg:user_constraints arguments arg arg arg arg arg Assign Call Assign Call Call If Compare Call Assign Call Assign Call Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "prepare_aot", + "source_code": "def prepare_aot(aot: list[str], srcs_dir: str) -> None:\n for file in aot:\n if 'external/local_tsl/' in file:\n copy_file(file, srcs_dir, 'external/local_tsl/')\n elif 'external/local_xla/' in file:\n copy_file(file, srcs_dir, 'external/local_xla/')\n else:\n copy_file(file, srcs_dir)\n shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/xla_build/CMakeLists.txt'), os.path.join(srcs_dir, 'CMakeLists.txt'))", + "docstring": "Rearrange xla_aot files in target the target directory. Args: aot: a list of paths to files that should be in xla_aot directory. srcs_dir: target directory where files are copied to.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py", + "ast_data": "FunctionDef name:prepare_aot arg:aot arg:srcs_dir arguments arg arg For If Compare Call If Compare Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "__call__", + "source_code": "def __call__(self, x):\n with np.errstate(invalid='ignore'):\n return umath.less(umath.absolute(umath.cos(x)), self.eps)", + "docstring": "Executes the call behavior.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_global_batch_size", + "source_code": "@property\ndef _global_batch_size(self):\n return True", + "docstring": "and use global batch size. assumes per-replica batching. Returns: Boolean.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py", + "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "add_op_with_blocks", + "source_code": "def add_op_with_blocks(graph_context: GraphContext, opname: str, *inputs: _C.Value, outputs: int=1, n_blocks: int=1, **attributes) -> tuple[Any, tuple[GraphContext, ...], _C.Node]:\n output_values = graph_context.op(opname, *inputs, outputs=outputs, **attributes)\n if isinstance(output_values, Sequence):\n node = output_values[0].node()\n else:\n node = output_values.node()\n new_contexts = []\n for _ in range(n_blocks):\n new_block = node.addBlock()\n new_context = dataclasses.replace(graph_context, block=new_block)\n new_contexts.append(new_context)\n return (output_values, tuple(new_contexts), node)", + "docstring": "Creates an ONNX operator \"opname\", taking inputs and attributes. Args: graph_context: The context for the current graph. opname: The ONNX operator name, e.g., or , or an operator qualified with a namespace, e.g., . inputs: The inputs to the operator. outputs: The number of outputs this operator returns. By default an operator is assumed to return a single output. If is greater than one, this functions returns a tuple of output , representing each output of the ONNX operator in order. n_blocks: The number of sub-blocks to create in the node. attributes: The attributes of the ONNX operator. Returns: A tuple of (output_values, new_contexts, node) where: output_values: One or more output value of this operator (see the keyword argument for multi-return nodes). new_contexts: A tuple of new graph contexts for each sub-block. node: The node representing the operator.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py", + "ast_data": "FunctionDef name:add_op_with_blocks arg:graph_context arg:opname arguments arg arg arg arg arg arg Assign Call If Call Assign Call Assign Call Assign For Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "BatchSampler", + "source_code": "class BatchSampler(Sampler[list[int]]):\n\n def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None:\n if not isinstance(batch_size, int) or isinstance(batch_size, bool) or batch_size <= 0:\n raise ValueError(f'batch_size should be a positive integer value, but got batch_size={batch_size}')\n if not isinstance(drop_last, bool):\n raise ValueError(f'drop_last should be a boolean value, but got drop_last={drop_last}')\n self.sampler = sampler\n self.batch_size = batch_size\n self.drop_last = drop_last\n\n def __iter__(self) -> Iterator[list[int]]:\n sampler_iter = iter(self.sampler)\n if self.drop_last:\n args = [sampler_iter] * self.batch_size\n for batch_droplast in zip(*args):\n yield [*batch_droplast]\n else:\n batch = [*itertools.islice(sampler_iter, self.batch_size)]\n while batch:\n yield batch\n batch = [*itertools.islice(sampler_iter, self.batch_size)]\n\n def __len__(self) -> int:\n if self.drop_last:\n return len(self.sampler) // self.batch_size\n else:\n return (len(self.sampler) + self.batch_size - 1) // self.batch_size", + "docstring": "Wraps another sampler to yield a mini-batch of indices. Args: sampler (Sampler or Iterable): Base sampler. Can be any iterable object batch_size (int): Size of mini-batch. drop_last (bool): If `` Example: >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]]", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\sampler.py", + "ast_data": "ClassDef name:BatchSampler FunctionDef name:__init__ arg:self arg:sampler arg:batch_size arg:drop_last arguments arg arg arg arg If BoolOp Call Call Compare Raise Call If Call Raise Call Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call If Assign For Call Assign Call While Assign Call FunctionDef name:__len__ arg:self arguments arg If Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__get__", + "source_code": "def __get__(self, instance, owner):\n del owner\n if isinstance(instance, composite_tensor.CompositeTensor) and instance._type_spec is not None:\n return types_lib.MethodType(self, instance)\n if instance not in self._descriptor_cache:\n if instance is None:\n return self\n self._descriptor_cache[instance] = class_method_to_instance_method(self, instance)\n return self._descriptor_cache[instance]", + "docstring": "Makes it possible to decorate instance methods.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If BoolOp Call Compare Return return:yes Call If Compare If Compare Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "add_continue_node", + "source_code": "def add_continue_node(self, ast_node, section_id, guards):\n node = self._add_jump_node(ast_node, guards)\n self.continues[section_id].add(node)", + "docstring": "Grows the graph by adding a reentry node. This node causes control flow to go back to the loop section's entry. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:add_continue_node arg:self arg:ast_node arg:section_id arg:guards arguments arg arg arg arg Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "_dist_wrapper", + "source_code": "def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):\n dist_matrix[:, slice_] = dist_func(*args, **kwargs)", + "docstring": "Write in-place to a slice of a distance matrix.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py", + "ast_data": "FunctionDef name:_dist_wrapper arg:dist_func arg:dist_matrix arg:slice_ arguments arg arg arg arg arg Assign Call" + }, + { + "library": "scikit-learn", + "name": "_color_brew", + "source_code": "def _color_brew(n):\n color_list = []\n s, v = (0.75, 0.9)\n c = s * v\n m = v - c\n for h in np.arange(25, 385, 360.0 / n).astype(int):\n h_bar = h / 60.0\n x = c * (1 - abs(h_bar % 2 - 1))\n rgb = [(c, x, 0), (x, c, 0), (0, c, x), (0, x, c), (x, 0, c), (c, 0, x), (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n rgb = [int(255 * (r + m)), int(255 * (g + m)), int(255 * (b + m))]\n color_list.append(rgb)\n return color_list", + "docstring": "Generate n colors with equally spaced hues. Parameters ---------- n : int The number of colors required. Returns ------- color_list : list, length n List of n tuples of form (R, G, B) being the components of each color.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\tree\\_export.py", + "ast_data": "FunctionDef name:_color_brew arg:n arguments arg Assign Assign Assign Assign For Call Call Assign Assign Call Assign Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_get_window_indexer", + "source_code": "def _get_window_indexer(self) -> BaseIndexer:\n return ExponentialMovingWindowIndexer()", + "docstring": "Return an indexer class that will compute the window start and end bounds", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\ewm.py", + "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "prod", + "source_code": "def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n _mask = self._mask\n newmask = _check_mask_axis(_mask, axis, **kwargs)\n if out is None:\n result = self.filled(1).prod(axis, dtype=dtype, **kwargs)\n rndim = getattr(result, 'ndim', 0)\n if rndim:\n result = result.view(type(self))\n result.__setmask__(newmask)\n elif newmask:\n result = masked\n return result\n result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = newmask\n return out", + "docstring": "Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Refer to for full documentation. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:prod arg:self arg:axis arg:dtype arg:out arg:keepdims arguments arg arg arg arg arg Assign Compare Assign Assign Call If Compare Assign Call Call Assign Call If Assign Call Call Call If Assign Return return:yes Assign Call Call If Call Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "messages", + "source_code": "def messages(request):\n return {'messages': get_messages(request), 'DEFAULT_MESSAGE_LEVELS': DEFAULT_LEVELS}", + "docstring": "Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'.", + "type": "function", + "file_path": "django\\django\\contrib\\messages\\context_processors.py", + "ast_data": "FunctionDef name:messages arg:request arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_LAF(laf)\n KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n B, N = laf.shape[:2]\n PS: int = self.patch_size\n patches: torch.Tensor = extract_patches_from_pyramid(img, make_upright(laf), PS, True).view(-1, 1, PS, PS)\n xy = self.features(self._normalize_input(patches)).view(-1, 3)\n a1 = torch.cat([1.0 + xy[:, 0].reshape(-1, 1, 1), 0 * xy[:, 0].reshape(-1, 1, 1)], dim=2)\n a2 = torch.cat([xy[:, 1].reshape(-1, 1, 1), 1.0 + xy[:, 2].reshape(-1, 1, 1)], dim=2)\n new_laf_no_center = torch.cat([a1, a2], dim=1).reshape(B, N, 2, 2)\n new_laf = torch.cat([new_laf_no_center, laf[:, :, :, 2:3]], dim=3)\n scale_orig = get_laf_scale(laf)\n if self.preserve_orientation:\n ori_orig = get_laf_orientation(laf)\n ellipse_scale = get_laf_scale(new_laf)\n laf_out = scale_laf(make_upright(new_laf), scale_orig / ellipse_scale)\n if self.preserve_orientation:\n laf_out = set_laf_orientation(laf_out, ori_orig)\n return laf_out", + "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF_out: :math:", + "type": "method", + "file_path": "kornia\\kornia\\feature\\affine_shape.py", + "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Assign Call If Assign Call Assign Call Assign Call Call If Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "get_table_type", + "source_code": "def get_table_type(self) -> str:\n if self.is_longtable():\n return 'longtable'\n elif self.has_verbatim:\n return 'tabular'\n elif self.colspec:\n return 'tabulary'\n elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):\n return 'tabular'\n else:\n return 'tabulary'", + "docstring": "Returns the LaTeX environment name for the table. The class currently supports: * longtable * tabular * tabulary", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\latex.py", + "ast_data": "FunctionDef name:get_table_type arg:self arguments arg If Call Return return:yes If Return return:yes If Return return:yes If BoolOp BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "local_devices_from_num_gpus", + "source_code": "def local_devices_from_num_gpus(num_gpus):\n return tuple(('/device:GPU:%d' % i for i in range(num_gpus))) or ('/device:CPU:0',)", + "docstring": "Returns device strings for local GPUs or CPU.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py", + "ast_data": "FunctionDef name:local_devices_from_num_gpus arg:num_gpus arguments arg Return return:yes BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "is_unsafe_leaf", + "source_code": "def is_unsafe_leaf(self, row, predicted_config, choice2time):\n return False", + "docstring": "Can be overridden by subclasses to define their own logic for deciding when a leaf is unsafe. Returns a sample that landed in the leaf, the choice predicted by the tree, and a dictionary that maps each choice to the execution time. One can for example decide to mark a leaf as unsafe if the predicted choice is 2x slower than the fastest choice. If a leaf is unsafe, the learned heuristic will always return 'unsure' if an input lands in that leaf.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:is_unsafe_leaf arg:self arg:row arg:predicted_config arg:choice2time arguments arg arg arg arg Return return:yes" + }, + { + "library": "scipy", + "name": "_skip_if_str_or_tuple", + "source_code": "def _skip_if_str_or_tuple(window):\n if isinstance(window, str) or isinstance(window, tuple) or callable(window):\n return None\n else:\n return window", + "docstring": "Handle being a str or a tuple or an array-like.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_delegators.py", + "ast_data": "FunctionDef name:_skip_if_str_or_tuple arg:window arguments arg If BoolOp Call Call Call Return return:no Return return:yes" + }, + { + "library": "pytorch", + "name": "check_subgraphs_connected", + "source_code": "@compatibility(is_backward_compatible=False)\ndef check_subgraphs_connected(subgraph1: SourcePartition, subgraph2: SourcePartition) -> bool:\n for node in reversed(subgraph1.nodes):\n for user in node.users.keys():\n if user in subgraph2.nodes:\n return True\n return False", + "docstring": "Given two subgraphs A and B (in the form of a list of nodes), checks if A has nodes connecting to at least one node in B -- aka there exists a node in B that uses a node in A (not the other way around).", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\utils\\source_matcher_utils.py", + "ast_data": "FunctionDef name:check_subgraphs_connected arg:subgraph1 arg:subgraph2 arguments arg arg For Call For Call If Compare Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "stop_filter", + "source_code": "def stop_filter(self, filter_func):\n pass", + "docstring": "Switch back to the original renderer. The contents of the temporary renderer is processed with the *filter_func* and is drawn on the original renderer as an image. Currently only supported by the agg renderer.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:stop_filter arg:self arg:filter_func arguments arg arg" + }, + { + "library": "tensorflow", + "name": "merge", + "source_code": "def merge(inputs, name=None):\n if any((inp is None for inp in inputs)):\n raise ValueError('At least one of the merge inputs is None: %s' % inputs)\n with ops.name_scope(name, 'Merge', inputs) as name:\n inputs = [ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) for inp in inputs]\n if all((isinstance(v, tensor_lib.Tensor) for v in inputs)):\n if all((v.dtype._is_ref_dtype for v in inputs)):\n return gen_control_flow_ops.ref_merge(inputs, name)\n else:\n return gen_control_flow_ops.merge(inputs, name)\n else:\n if all((isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor)) for v in inputs)):\n inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)\n for v in inputs:\n if not isinstance(v, composite_tensor.CompositeTensor):\n raise TypeError('Type %s not supported' % type(v))\n for v in inputs[1:]:\n nest.assert_same_structure(inputs[0], v, expand_composites=True)\n flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs]\n merged_results = [gen_control_flow_ops.merge(component) for component in zip(*flat_inputs)]\n flat_merged = [tensor for tensor, _ in merged_results]\n chosen_index = merged_results[0][1]\n merged_inputs = nest.pack_sequence_as(inputs[0], flat_merged, expand_composites=True)\n return (merged_inputs, chosen_index)", + "docstring": "Returns the value of an available element of . This op tests each of the tensors in in turn to determine if any of them is available. If it finds an available tensor, it returns it and its index in . It is an error if more than one tensor in is available. If no tensor in is available, the returned tensor and index are not set. This op handles both s and . If inputs has a mix of s and , all inputs are converted to IndexedSlices before merging. Args: inputs: The input tensors, at most one of which is available. name: A name for this operation (optional). Returns: A tuple containing the chosen input tensor and its index in . Raises: ValueError: If any of the inputs is None, or inputs are IndexedSlices and some but not all have a dense_shape property.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:merge arg:inputs arg:name arguments arg arg If Call Compare Raise Call With Call Assign Call If Call Call If Call Return return:yes Call Return return:yes Call If Call Call Assign Call For If Call Raise Call Call For Call Assign Call Assign Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_unlift", + "source_code": "def _unlift(gm: torch.fx.GraphModule, lifted_inputs: Sequence[Optional[str]], mutated_outputs: Sequence[Optional[str]], in_spec: pytree.TreeSpec, out_spec: Optional[pytree.TreeSpec], state_dict: dict[str, Any], constants: dict[str, Any], forward_arg_names: Optional[list[str]]=None):\n unlifted_name_to_node, input_name_to_node = _unlift_inputs_as_getattr(gm, lifted_inputs)\n _insert_copy_for_mutations(gm, mutated_outputs, unlifted_name_to_node, input_name_to_node)\n gm.graph._codegen = _get_codegen(in_spec, out_spec, forward_arg_names)\n gm.graph.lint()\n gm.recompile()\n return gm", + "docstring": "Args: lifted_inputs: A list matching the graph module's input nodes. For an input node that is referring to a lifted parameter/buffer, this list will contain the fqn the corresponding attribute. Otherwise, this list will contain None. This is used to unlift the lifted parameters as get_attr nodes. mutated_outputs: A list matching the graph module's output nodes. For an output node that is referring to a mutated buffer or user input, this list will contain the name of the corresponding buffer or user input that needs to be mutated. Otherwise, this list will contain None. This is used to re-insert an inplace copy_ operator to copy the mutated values back to the original node.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_unlift.py", + "ast_data": "FunctionDef name:_unlift arg:gm arg:lifted_inputs arg:mutated_outputs arg:in_spec arg:out_spec arg:state_dict arg:constants arg:forward_arg_names arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_sated", + "source_code": "def _check_sated(self, raise_error):\n if self._sated:\n return\n creation_stack = ''.join([line.rstrip() for line in traceback.format_stack(self._stack_frame, limit=5)])\n if raise_error:\n try:\n raise RuntimeError('Object was never used (type {}): {}. If you want to mark it as used call its \"mark_used()\" method. It was originally created here:\\n{}'.format(self._type, self._repr, creation_stack))\n finally:\n self.sate()\n else:\n tf_logging.error('==================================\\nObject was never used (type {}):\\n{}\\nIf you want to mark it as used call its \"mark_used()\" method.\\nIt was originally created here:\\n{}\\n=================================='.format(self._type, self._repr, creation_stack))", + "docstring": "Check if the object has been sated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_should_use.py", + "ast_data": "FunctionDef name:_check_sated arg:self arg:raise_error arguments arg arg If Return return:no Assign Call Call Call If Try Raise Call Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "get_error_page", + "source_code": "def get_error_page(self, *args, **kwargs):\n return get_error_page(*args, **kwargs)", + "docstring": "Compose an HTML page with error information.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:get_error_page arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "update_from_data_xy", + "source_code": "def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):\n if len(xy) == 0:\n return\n path = Path(xy)\n self.update_from_path(path, ignore=ignore, updatex=updatex, updatey=updatey)", + "docstring": "Update the bounds based on the passed in *xy* coordinates. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. Parameters ---------- xy : (N, 2) array-like The (x, y) coordinates. ignore : bool, optional - When `BboxBboxignore`, update the x/y values.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:update_from_data_xy arg:self arg:xy arg:ignore arg:updatex arg:updatey arguments arg arg arg arg arg If Compare Call Return return:no Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_get_broadcast_num_row_partitions", + "source_code": "def _get_broadcast_num_row_partitions(a: DynamicRaggedShape, b: DynamicRaggedShape):\n if a.num_row_partitions == 0 and b.num_row_partitions == 0:\n return 0\n expanded_num_row_partitions_a = a.num_row_partitions + max(0, b.rank - a.rank)\n expanded_num_row_partitions_b = b.num_row_partitions + max(0, a.rank - b.rank)\n if a.num_row_partitions == 0:\n return expanded_num_row_partitions_b\n if b.num_row_partitions == 0:\n return expanded_num_row_partitions_a\n return max(expanded_num_row_partitions_a, expanded_num_row_partitions_b)", + "docstring": "Returns broadcast_dynamic_shape(a, b).num_row_partitions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_get_broadcast_num_row_partitions arg:a arg:b arguments arg arg If BoolOp Compare Compare Return return:yes Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ConstantPad3d", + "source_code": "class ConstantPad3d(_ConstantPadNd):\n padding: tuple[int, int, int, int, int, int]\n\n def __init__(self, padding: _size_6_t, value: float) -> None:\n super().__init__(value)\n self.padding = _ntuple(6)(padding)", + "docstring": "Pads the input tensor boundaries with a constant value. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> m = nn.ConstantPad3d(3, 3.5) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\padding.py", + "ast_data": "ClassDef name:ConstantPad3d FunctionDef name:__init__ arg:self arg:padding arg:value arguments arg arg arg Call Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_convert_enter", + "source_code": "def _convert_enter(self, parent_pfor: 'PFor', enter):\n inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])\n control_inputs = []\n for x in enter.op.control_inputs:\n converted = parent_pfor._convert_helper(x)\n if not isinstance(converted, ops.Operation):\n converted = converted.t\n control_inputs.append(converted)\n if control_inputs:\n with ops.control_dependencies(control_inputs):\n inp = array_ops.identity(inp)\n return (inp, stacked)", + "docstring": "Converts an Enter node.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_convert_enter arg:self arg:parent_pfor arg:enter arguments arg arg arg Assign Call Assign For Assign Call If Call Assign Call If With Call Assign Call Return return:yes" + }, + { + "library": "pygame", + "name": "get_bottom_layer", + "source_code": "def get_bottom_layer(self):\n return self._spritelayers[self._spritelist[0]]", + "docstring": "return the bottom layer LayeredUpdates.get_bottom_layer(): return layer", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_bottom_layer arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_supylabel", + "source_code": "def get_supylabel(self):\n text_obj = self._supylabel\n return '' if text_obj is None else text_obj.get_text()", + "docstring": "Return the supylabel as string or an empty string if not set.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_supylabel arg:self arguments arg Assign Return return:yes Compare Call" + }, + { + "library": "numpy", + "name": "hermgrid3d", + "source_code": "def hermgrid3d(x, y, z, c):\n return pu._gridnd(hermval, c, x, y, z)", + "docstring": "Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) where the points `axbyczxyzxyzxyzccxyzxyzcxy`. See Also -------- hermval, hermval2d, hermgrid2d, hermval3d Examples -------- >>> from numpy.polynomial.hermite import hermgrid3d >>> x = [1, 2] >>> y = [4, 5] >>> z = [6, 7] >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> hermgrid3d(x, y, z, c) array([[[ 40077., 54117.], [ 49293., 66561.]], [[ 72375., 97719.], [ 88975., 120131.]]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermgrid3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_display_window", + "source_code": "def _display_window(self, pf: PythonFileT, r: LintResult) -> Iterator[str]:\n if r.char is None or not self.report_column_numbers:\n yield f'{pf.path}:{r.line}: {r.name}'\n else:\n yield f'{pf.path}:{r.line}:{r.char + 1}: {r.name}'\n begin = max((r.line or 0) - ErrorLines.BEFORE, 1)\n end = min(begin + ErrorLines.WINDOW, 1 + len(pf.lines))\n for lineno in range(begin, end):\n source_line = pf.lines[lineno - 1].rstrip()\n yield f'{lineno:5} | {source_line}'\n if lineno == r.line:\n spaces = 8 + (r.char or 0)\n carets = len(source_line) if r.char is None else r.length or 1\n yield (spaces * ' ' + carets * '^')", + "docstring": "Display a window onto the code with an error", + "type": "method", + "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py", + "ast_data": "FunctionDef name:_display_window arg:self arg:pf arg:r arguments arg arg arg If BoolOp Compare Assign Call BoolOp Assign Call Call For Call Assign Call If Compare Assign BoolOp Assign Compare Call BoolOp" + }, + { + "library": "tensorflow", + "name": "LayerCall", + "source_code": "class LayerCall(object):\n\n def __init__(self, call_collection, call_fn, name, input_signature):\n self.call_collection = call_collection\n self.input_signature = input_signature\n self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature=input_signature)\n self.original_layer_call = call_collection.layer_call_method\n\n def _maybe_trace(self, args, kwargs):\n if tracing_enabled():\n self.call_collection.add_trace(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n self._maybe_trace(args, kwargs)\n return self.wrapped_call(*args, **kwargs)\n\n def get_concrete_function(self, *args, **kwargs):\n self._maybe_trace(args, kwargs)\n return self.wrapped_call.get_concrete_function(*args, **kwargs)", + "docstring": "Function that triggers traces of other functions in the same collection.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "ClassDef name:LayerCall FunctionDef name:__init__ arg:self arg:call_collection arg:call_fn arg:name arg:input_signature arguments arg arg arg arg arg Assign Assign Assign Call Call Assign FunctionDef name:_maybe_trace arg:self arg:args arg:kwargs arguments arg arg arg If Call Call FunctionDef name:__call__ arg:self arguments arg arg arg Call Return return:yes Call FunctionDef name:get_concrete_function arg:self arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "real_if_close", + "source_code": "@array_function_dispatch(_real_if_close_dispatcher)\ndef real_if_close(a, tol=100):\n a = asanyarray(a)\n type_ = a.dtype.type\n if not issubclass(type_, _nx.complexfloating):\n return a\n if tol > 1:\n f = getlimits.finfo(type_)\n tol = f.eps * tol\n if _nx.all(_nx.absolute(a.imag) < tol):\n a = a.real\n return a", + "docstring": "If input is complex with all imaginary parts close to zero, return real parts. \"Close to zero\" is defined as * (machine epsilon of the type for ). Parameters ---------- a : array_like Input array. tol : float Tolerance in machine epsilons for the complex part of the elements in the array. If the tolerance is >> import numpy as np >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) array([2.1, 5.2]) >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) array([2.1+4.e-13j, 5.2 + 3e-15j])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_type_check_impl.py", + "ast_data": "FunctionDef name:real_if_close arg:a arg:tol arguments arg arg Assign Call Assign If Call Return return:yes If Compare Assign Call Assign If Call Compare Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "write_bytecode", + "source_code": "def write_bytecode(self, install_root):\n bytecode_file_names = [f'bytecode_{i}.c' for i in range(NUM_BYTECODE_FILES)]\n bytecode_files = [open(os.path.join(install_root, name), 'w') for name in bytecode_file_names]\n it = itertools.cycle(bytecode_files)\n for m in self.frozen_modules:\n self.write_frozen(m, next(it))\n for f in bytecode_files:\n f.close()", + "docstring": "Write the files containing the frozen bytecode. Shared frozen modules evenly across the files.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_freeze.py", + "ast_data": "FunctionDef name:write_bytecode arg:self arg:install_root arguments arg arg Assign Call Assign Call Call Assign Call For Call Call For Call" + }, + { + "library": "sphinx", + "name": "LiteralBlockTransform", + "source_code": "class LiteralBlockTransform(SphinxPostTransform):\n default_priority = 400\n formats = ('latex',)\n\n def run(self, **kwargs: Any) -> None:\n matcher = NodeMatcher(nodes.container, literal_block=True)\n for node in matcher.findall(self.document):\n newnode = captioned_literal_block('', *node.children, **node.attributes)\n node.replace_self(newnode)", + "docstring": "Replace container nodes for literal_block by captioned_literal_block.", + "type": "class", + "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py", + "ast_data": "ClassDef name:LiteralBlockTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign Call For Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "start_rasterizing", + "source_code": "def start_rasterizing(self):\n pass", + "docstring": "Switch to the raster renderer. Used by .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:start_rasterizing arg:self arguments arg" + }, + { + "library": "numpy", + "name": "fromregex", + "source_code": "@set_module('numpy')\ndef fromregex(file, regexp, dtype, encoding=None):\n own_fh = False\n if not hasattr(file, 'read'):\n file = os.fspath(file)\n file = np.lib._datasource.open(file, 'rt', encoding=encoding)\n own_fh = True\n try:\n if not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if dtype.names is None:\n raise TypeError('dtype must be a structured datatype.')\n content = file.read()\n if isinstance(content, bytes) and isinstance(regexp, str):\n regexp = asbytes(regexp)\n if not hasattr(regexp, 'match'):\n regexp = re.compile(regexp)\n seq = regexp.findall(content)\n if seq and (not isinstance(seq[0], tuple)):\n newdtype = np.dtype(dtype[dtype.names[0]])\n output = np.array(seq, dtype=newdtype)\n output.dtype = dtype\n else:\n output = np.array(seq, dtype=dtype)\n return output\n finally:\n if own_fh:\n file.close()", + "docstring": "Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : file, str, or pathlib.Path Filename or file object to read. .. versionchanged:: 1.22.0 Now accepts implementations. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array; must be a structured datatype. encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. Returns ------- output : ndarray The output array, containing the part of the content of that was matched by . is always a structured array. Raises ------ TypeError When is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see . Examples -------- >>> import numpy as np >>> from io import StringIO >>> text = StringIO(\"1312 foo\\n1534 bar\\n444 qux\") >>> regexp = r\"(\\d+)\\s+(...)\" # match [digits, whitespace, anything] >>> output = np.fromregex(text, regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], dtype=[('num', '>> output['num'] array([1312, 1534, 444])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", + "ast_data": "FunctionDef name:fromregex arg:file arg:regexp arg:dtype arg:encoding arguments arg arg arg arg Assign If Call Assign Call Assign Call Assign Try If Call Assign Call If Compare Raise Call Assign Call If BoolOp Call Call Assign Call If Call Assign Call Assign Call If BoolOp Call Assign Call Assign Call Assign Assign Call Return return:yes If Call Call" + }, + { + "library": "pytorch", + "name": "parent_child_names", + "source_code": "def parent_child_names(name):\n split_name = name.rsplit('.', 1)\n if len(split_name) == 1:\n return ('', split_name[0])\n else:\n return (split_name[0], split_name[1])", + "docstring": "Split full name of submodule into parent submodule's full name and submodule's name.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\_correct_bias.py", + "ast_data": "FunctionDef name:parent_child_names arg:name arguments arg Assign Call If Compare Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='partial_fit', callee='partial_fit').add(caller='fit', callee='fit'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multioutput.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_from_datetime64", + "source_code": "@classmethod\ndef _from_datetime64(cls, data, freq, tz=None) -> Self:\n if isinstance(freq, BaseOffset):\n freq = PeriodDtype(freq)._freqstr\n data, freq = dt64arr_to_periodarr(data, freq, tz)\n dtype = PeriodDtype(freq)\n return cls(data, dtype=dtype)", + "docstring": "Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq]", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\period.py", + "ast_data": "FunctionDef name:_from_datetime64 arg:cls arg:data arg:freq arg:tz arguments arg arg arg arg If Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "dstep", + "source_code": "def dstep(system, x0=None, t=None, n=None):\n if isinstance(system, dlti):\n system = system._as_ss()\n elif isinstance(system, lti):\n raise AttributeError('dstep can only be used with discrete-time dlti systems.')\n else:\n system = dlti(*system[:-1], dt=system[-1])._as_ss()\n if n is None:\n n = 100\n if t is None:\n t = np.linspace(0, n * system.dt, n, endpoint=False)\n else:\n t = np.asarray(t)\n yout = None\n for i in range(0, system.inputs):\n u = np.zeros((t.shape[0], system.inputs))\n u[:, i] = np.ones((t.shape[0],))\n one_output = dlsim(system, u, t=t, x0=x0)\n if yout is None:\n yout = (one_output[1],)\n else:\n yout = yout + (one_output[1],)\n tout = one_output[0]\n return (tout, yout)", + "docstring": "Step response of discrete-time system. Parameters ---------- system : dlti | tuple An instance of the LTI class or a tuple describing the system. The number of elements in the tuple determine the interpretation. I.e.: * `dltiTransferFunctionZerosPolesGainStateSpaceTransferFunctionZerosPolesGainStateSpacet` is not given). Returns ------- tout : ndarray Output time points, as a 1-D array. yout : tuple of ndarray Step response of system. Each element of the tuple represents the output of the system based on a step response to each input. See Also -------- step, dimpulse, dlsim, cont2discrete Examples -------- The following example illustrates how to create a digital Butterworth filer and plot its step response: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt ... >>> dt = 1 # sampling interval is one => time unit is sample number >>> bb, aa = signal.butter(3, 0.25, fs=1/dt) >>> t, y = signal.dstep((bb, aa, dt), n=25) ... >>> fig0, ax0 = plt.subplots() >>> ax0.step(t, np.squeeze(y), '.-', where='post') >>> ax0.set_title(r\"Step Response of a $3^\\text{rd}$ Order Butterworth Filter\") >>> ax0.set(xlabel='Sample number', ylabel='Amplitude', ylim=(0, 1.1*np.max(y))) >>> ax0.grid() >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:dstep arg:system arg:x0 arg:t arg:n arguments arg arg arg arg If Call Assign Call If Call Raise Call Assign Call Call If Compare Assign If Compare Assign Call Assign Call Assign For Call Assign Call Assign Call Assign Call If Compare Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "all_mismatch_leaf_graph_info", + "source_code": "def all_mismatch_leaf_graph_info(self) -> list[GraphInfo]:\n if not self.has_mismatch():\n return []\n no_mismatch_children = (self.upper_graph_info is None or not self.upper_graph_info.has_mismatch()) and (self.lower_graph_info is None or not self.lower_graph_info.has_mismatch())\n if no_mismatch_children:\n return [self]\n results = []\n if self.upper_graph_info is not None:\n results += self.upper_graph_info.all_mismatch_leaf_graph_info()\n if self.lower_graph_info is not None:\n results += self.lower_graph_info.all_mismatch_leaf_graph_info()\n return results", + "docstring": "Return a list of all leaf objects that have mismatch.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:all_mismatch_leaf_graph_info arg:self arguments arg If Call Return return:no Assign BoolOp BoolOp Compare Call BoolOp Compare Call If Return return:yes Assign If Compare Call If Compare Call Return return:yes" + }, + { + "library": "django", + "name": "validate", + "source_code": "def validate(self, value, model_instance):\n if not self.editable:\n return\n if self.choices is not None and value not in self.empty_values:\n for option_key, option_value in self.choices:\n if isinstance(option_value, (list, tuple)):\n for optgroup_key, optgroup_value in option_value:\n if value == optgroup_key:\n return\n elif value == option_key:\n return\n raise exceptions.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value})\n if value is None and (not self.null):\n raise exceptions.ValidationError(self.error_messages['null'], code='null')\n if not self.blank and value in self.empty_values:\n raise exceptions.ValidationError(self.error_messages['blank'], code='blank')", + "docstring": "Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:validate arg:self arg:value arg:model_instance arguments arg arg arg If Return return:no If BoolOp Compare Compare For If Call For If Compare Return return:no If Compare Return return:no Raise Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "distribute_strategy", + "source_code": "@property\ndef distribute_strategy(self):\n return self._distribution_strategy or distribute_lib.get_strategy()", + "docstring": "The this model was created under.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:distribute_strategy arg:self arguments arg Return return:yes BoolOp Call" + }, + { + "library": "pytorch", + "name": "mtia", + "source_code": "def mtia(self, device: Optional[Union[int, device]]=None) -> Self:\n return self._apply(lambda t: t.mtia(device))", + "docstring": "Move all model parameters and buffers to the MTIA. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on MTIA while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:mtia arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "keras_tensor_from_tensor", + "source_code": "def keras_tensor_from_tensor(tensor):\n keras_tensor_cls = None\n for tensor_type, cls in keras_tensor_classes:\n if isinstance(tensor, tensor_type):\n keras_tensor_cls = cls\n break\n out = keras_tensor_cls.from_tensor(tensor)\n if hasattr(tensor, '_keras_mask'):\n out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask)\n return out", + "docstring": "Convert a traced (composite)tensor to a representative KerasTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:keras_tensor_from_tensor arg:tensor arguments arg Assign For If Call Assign Assign Call If Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "CustomObjectScope", + "source_code": "class CustomObjectScope(object):\n\n def __init__(self, *args):\n self.custom_objects = args\n self.backup = None\n\n def __enter__(self):\n self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()\n for objects in self.custom_objects:\n _GLOBAL_CUSTOM_OBJECTS.update(objects)\n return self\n\n def __exit__(self, *args, **kwargs):\n _GLOBAL_CUSTOM_OBJECTS.clear()\n _GLOBAL_CUSTOM_OBJECTS.update(self.backup)", + "docstring": "Exposes custom classes/functions to Keras deserialization internals. Under a scope , Keras methods such as or will be able to deserialize any custom object referenced by a saved config (e.g. a custom layer or metric). Example: Consider a custom regularizer : Args: *args: Dictionary or dictionaries of pairs.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", + "ast_data": "ClassDef name:CustomObjectScope FunctionDef name:__init__ arg:self arguments arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call For Call Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg arg Call Call" + }, + { + "library": "django", + "name": "__delattr__", + "source_code": "def __delattr__(self, name):\n super().__delattr__(name)\n self.__dict__.pop(name, None)", + "docstring": "Delete a setting and clear it from cache if needed.", + "type": "method", + "file_path": "django\\django\\conf\\__init__.py", + "ast_data": "FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "recover_session", + "source_code": "def recover_session(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:\n sess, is_loaded_from_checkpoint = self._restore_checkpoint(master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config)\n local_init_success, msg = self._try_run_local_init_op(sess)\n if not is_loaded_from_checkpoint:\n return (sess, False)\n restoring_file = checkpoint_dir or checkpoint_filename_with_path\n if not local_init_success:\n logging.info('Restoring model from %s did not make model ready for local init: %s', restoring_file, msg)\n return (sess, False)\n is_ready, msg = self._model_ready(sess)\n if not is_ready:\n logging.info('Restoring model from %s did not make model ready: %s', restoring_file, msg)\n return (sess, False)\n logging.info('Restored model from %s', restoring_file)\n return (sess, is_loaded_from_checkpoint)", + "docstring": "Creates a , recovering if possible. Creates a new session on 'master'. If the session is not initialized and can be recovered from a checkpoint, recover it. Args: master: representation of the TensorFlow master to use. saver: A object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional proto used to configure the session. Returns: A pair (sess, initialized) where 'initialized' is if the session could be recovered and initialized, otherwise. Raises: ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py", + "ast_data": "FunctionDef name:recover_session arg:self arg:master arg:saver arg:checkpoint_dir arg:checkpoint_filename_with_path arg:wait_for_checkpoint arg:max_wait_secs arg:config arguments arg arg arg arg arg arg arg arg Assign Call Assign Call If Return return:yes Assign BoolOp If Call Return return:yes Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "run_and_parse_first_match", + "source_code": "def run_and_parse_first_match(run_lambda, command, regex):\n rc, out, _ = run_lambda(command)\n if rc != 0:\n return None\n match = re.search(regex, out)\n if match is None:\n return None\n return match.group(1)", + "docstring": "Run command using run_lambda, returns the first regex match if it exists.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\collect_env.py", + "ast_data": "FunctionDef name:run_and_parse_first_match arg:run_lambda arg:command arg:regex arguments arg arg arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, sv, sess, step_counter=None):\n super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)\n self._sv = sv\n self._sess = sess\n self._last_time = 0.0\n self._last_step = 0\n step_counter = sv.global_step if step_counter is None else step_counter\n self._step_counter = step_counter\n self._summary_tag = '%s/sec' % self._step_counter.op.name", + "docstring": "Create a . Args: sv: A . sess: A . step_counter: A holding the step counter. By defaults, it uses sv.global_step.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sv arg:sess arg:step_counter arguments arg arg arg arg Call Call Assign Assign Assign Assign Assign Compare Assign Assign" + }, + { + "library": "scipy", + "name": "zeros", + "source_code": "@property\ndef zeros(self):\n return self.to_zpk().zeros", + "docstring": "Zeros of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:zeros arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "search_combination", + "source_code": "def search_combination(transfer_rate_bytes_per_sec, node_to_latency_mapping) -> bool:\n partition_to_latency_mapping = get_partition_to_latency_mapping(self.partitions, node_to_latency_mapping)\n cost = get_latency_of_partitioned_graph(self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)\n if len(self.partitions) == 1:\n return False\n partition_pair: list[int] = []\n for i in range(len(self.partitions) - 1):\n for j in range(i + 1, len(self.partitions)):\n new_cost = try_combining_partitions(i, j, self.partitions[:])\n if new_cost <= cost:\n partition_pair = [i, j]\n cost = new_cost\n reorganize_partitions(self.partitions)\n if len(partition_pair) != 0:\n p0 = self.partitions[partition_pair[0]]\n p1 = self.partitions[partition_pair[1]]\n combine_two_partitions(p0, p1, self.partitions)\n get_bfs_level_partition(self.partitions)\n reset_partition_device(self.partitions)\n get_device_to_partitions_mapping(self.partitions, self.devices)\n return len(partition_pair) != 0", + "docstring": "Given transfer rate between partitions and each node's latency, find two partitions to combine so the cost of the partitions can be reduced. The algorithm is : 1. Go through all the partition pairs and see if any pair of partitions can be combined. 2. Calculate the cost after the combination. 3. Select the minimum cost and combine its corresponding partition pair.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:search_combination arg:transfer_rate_bytes_per_sec arg:node_to_latency_mapping arguments arg arg Assign Call Assign Call If Compare Call Return return:yes For Call Call For Call Call Assign Call If Compare Assign Assign Call If Compare Call Assign Assign Call Call Call Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_set_mutable", + "source_code": "def _set_mutable(self, mutable):\n object.__setattr__(self, '_mutable', mutable)", + "docstring": "Change the mutability value to on this options and children.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py", + "ast_data": "FunctionDef name:_set_mutable arg:self arg:mutable arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "initialize_system_for_tpu_embedding", + "source_code": "def initialize_system_for_tpu_embedding(embedding_config: embedding_pb2.TPUEmbeddingConfiguration, job: Optional[Text]=None) -> ops.Operation:\n config_string = embedding_config.SerializeToString()\n with ops.device(_tpu_system_device_name(job)):\n return tpu_ops.configure_tpu_embedding(config=config_string)", + "docstring": "Initializes a distributed TPU Embedding system for use with TensorFlow. The following two are equivalent: 1. initialize_system() with embedding_config. 2. initialize_system() without embedding_config, then initialize_system_for_tpu_embedding(). initialize_system() should not be called with embedding_config if initialize_system_for_tpu_embedding() is meant to be called later. Args: embedding_config: a proto describing the desired configuration of the hardware embedding lookup tables. job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be initialized. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold. Returns: A no-op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:initialize_system_for_tpu_embedding arg:embedding_config arg:job arguments arg arg Assign Call With Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, input_gen):\n self.input_gen = input_gen", + "docstring": "Creates a representative dataset. Args: input_gen: A generator function that generates input samples for the model and has the same order, type and shape as the inputs to the model. Usually, this is a small subset of a few hundred samples randomly chosen, in no particular order, from the training or evaluation dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:input_gen arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "to_list", + "source_code": "def to_list(self):\n if isinstance(self._values, RaggedTensorValue):\n values_as_list = self._values.to_list()\n else:\n values_as_list = self._values.tolist()\n return [values_as_list[self._row_splits[i]:self._row_splits[i + 1]] for i in range(len(self._row_splits) - 1)]", + "docstring": "Returns this ragged tensor value as a nested Python list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py", + "ast_data": "FunctionDef name:to_list arg:self arguments arg If Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_relations", + "source_code": "def get_relations(self, cursor, table_name):\n table_name = table_name.upper()\n cursor.execute('\\n SELECT ca.column_name, cb.table_name, cb.column_name\\n FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb\\n WHERE user_constraints.table_name = %s AND\\n user_constraints.constraint_name = ca.constraint_name AND\\n user_constraints.r_constraint_name = cb.constraint_name AND\\n ca.position = cb.position', [table_name])\n return {self.identifier_converter(field_name): (self.identifier_converter(rel_field_name), self.identifier_converter(rel_table_name)) for field_name, rel_table_name, rel_field_name in cursor.fetchall()}", + "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", + "type": "method", + "file_path": "django\\django\\db\\backends\\oracle\\introspection.py", + "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "validate_string_arg", + "source_code": "def validate_string_arg(input_data, allowable_strings, layer_name, arg_name, allow_none=False, allow_callables=False):\n if allow_none and input_data is None:\n return\n elif allow_callables and callable(input_data):\n return\n elif isinstance(input_data, str) and input_data in allowable_strings:\n return\n else:\n allowed_args = '`None`, ' if allow_none else ''\n allowed_args += 'a `Callable`, ' if allow_callables else ''\n allowed_args += 'or one of the following values: %s' % (allowable_strings,)\n raise ValueError('The %s argument of layer %s received an invalid value %s. Allowed values are: %s.' % (arg_name, layer_name, input_data, allowed_args))", + "docstring": "Validates the correctness of a string-based arg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py", + "ast_data": "FunctionDef name:validate_string_arg arg:input_data arg:allowable_strings arg:layer_name arg:arg_name arg:allow_none arg:allow_callables arguments arg arg arg arg arg arg If BoolOp Compare Return return:no If BoolOp Call Return return:no If BoolOp Call Compare Return return:no Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "_is_gputrace_device", + "source_code": "def _is_gputrace_device(self, device_name: str) -> bool:\n return '/stream:' in device_name or '/memcpy' in device_name", + "docstring": "Returns true if this device is part of the GPUTracer logging.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:_is_gputrace_device arg:self arg:device_name arguments arg arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "pytorch", + "name": "GpuData", + "source_code": "@dataclasses.dataclass\nclass GpuData:\n uuid: str\n utilization: float\n mem_utilization: float", + "docstring": "Dataclass for storing gpu data. This is the data that will be logged to the usage_log file.", + "type": "class", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "ClassDef name:GpuData" + }, + { + "library": "tensorflow", + "name": "_convert_values_and_partition", + "source_code": "@classmethod\ndef _convert_values_and_partition(cls, values, row_partition, name):\n if not isinstance(row_partition, RowPartition):\n raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n if isinstance(values, RaggedTensor):\n if values._row_partition.dtype != row_partition.dtype:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError(f'Argument `row_partition` of RaggedTensor with name: {name} must have same dtype as Argument `values`. ({row_partition.dtype} vs. {values._row_partition.dtype}).')\n values = values.with_row_splits_dtype(row_partition.dtype)\n else:\n values = _convert_to_ragged_tensor_values(values)\n return (values, row_partition)", + "docstring": "Converts and to Tensors. If is a , then converts and to have compatible row-partitioning dtypes. In particular, if any of the row partitioning tensors are , then all of the other row partitioning tensors will be cast to (if auto_cast_partition_dtype() is true) or an error will be raised (if auto_cast_partition_dtype() is false). Args: values: The for the being constructed. row_partition: A RowPartition object for the being constructed. name: The name of the RowPartition object. Returns: A tuple (values, partition).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:_convert_values_and_partition arg:cls arg:values arg:row_partition arg:name arguments arg arg arg arg If Call Raise Call If Call If Compare If Call Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "generate_dynamo_fx_repro_string", + "source_code": "def generate_dynamo_fx_repro_string(gm, args, compiler_name, check_accuracy=False, *, stable_output=False, save_dir=None, command='run'):\n model_str = NNModuleToString.convert(gm)\n writer = InputWriter(save_dir, stable_hash=True)\n for placeholder, arg in zip(fx_placeholder_targets(gm), args):\n if isinstance(arg, (int, torch.SymInt)):\n writer.symint(placeholder, arg)\n elif isinstance(arg, torch.Tensor):\n writer.tensor(placeholder, arg)\n else:\n raise TypeError(f'arg is neither SymInt/int nor torch.Tensor, {arg}')\n load_args = '\\n'.join(writer.lines())\n return textwrap.dedent(f\"\\n{generate_env_vars_string(stable_output=stable_output)}\\nfrom math import inf\\nimport torch\\nfrom torch import tensor, device\\nimport torch.fx as fx\\nimport torch._dynamo\\nfrom torch._dynamo.testing import rand_strided\\nfrom torch._dynamo.debug_utils import run_fwd_maybe_bwd\\n\\n{generate_config_string(stable_output=stable_output)}\\n\\n{extra_imports}\\n\\n{model_str}\\nmod = Repro()\\n\\n{load_args}\\n\\nif __name__ == '__main__':\\n from torch._dynamo.repro.after_dynamo import run_repro\\n run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},\\n save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})\\n\")", + "docstring": "Generate a repro string for backend-agnostic minified version.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\repro\\after_dynamo.py", + "ast_data": "FunctionDef name:generate_dynamo_fx_repro_string arg:gm arg:args arg:compiler_name arg:check_accuracy arguments arg arg arg arg arg arg arg Assign Call Assign Call For Call Call If Call Call If Call Call Raise Call Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "numpy", + "name": "nancumsum", + "source_code": "@array_function_dispatch(_nancumsum_dispatcher)\ndef nancumsum(a, axis=None, dtype=None, out=None):\n a, mask = _replace_nan(a, 0)\n return np.cumsum(a, axis=axis, dtype=dtype, out=out)", + "docstring": "Return the cumulative sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are encountered and leading NaNs are replaced by zeros. Zeros are returned for slices that are all-NaN or empty. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If is not specified, it defaults to the dtype of , unless has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. See :ref: for more details. Returns ------- nancumsum : ndarray. A new array holding the result is returned unless is specified, in which it is returned. The result has the same size as , and the same shape as if is not None or is a 1-d array. See Also -------- numpy.cumsum : Cumulative sum across array propagating NaNs. isnan : Show which elements are NaN. Examples -------- >>> import numpy as np >>> np.nancumsum(1) array([1]) >>> np.nancumsum([1]) array([1]) >>> np.nancumsum([1, np.nan]) array([1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumsum(a) array([1., 3., 6., 6.]) >>> np.nancumsum(a, axis=0) array([[1., 2.], [4., 2.]]) >>> np.nancumsum(a, axis=1) array([[1., 3.], [3., 3.]])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py", + "ast_data": "FunctionDef name:nancumsum arg:a arg:axis arg:dtype arg:out arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "generate_numba_apply_func", + "source_code": "@functools.cache\ndef generate_numba_apply_func(func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency('numba')\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_apply(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any) -> np.ndarray:\n result = np.empty(len(begin))\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window))\n if len(window) - count_nan >= minimum_periods:\n result[i] = numba_func(window, *args)\n else:\n result[i] = np.nan\n return result\n return roll_apply", + "docstring": "Generate a numba jitted apply function specified by values from engine_kwargs. 1. jit the user's function 2. Return a rolling apply function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the rolling apply function. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function", + "type": "function", + "file_path": "pandas\\pandas\\core\\window\\numba_.py", + "ast_data": "FunctionDef name:generate_numba_apply_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:roll_apply arg:values arg:begin arg:end arg:minimum_periods arguments arg arg arg arg arg Assign Call Call For Call Call Assign Assign Assign Assign Call Call If Compare Call Assign Call Assign Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "to_python", + "source_code": "def to_python(self, value):\n return value", + "docstring": "Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "UmfpackNotFoundError", + "source_code": "class UmfpackNotFoundError(NotFoundError):\n pass", + "docstring": "UMFPACK sparse solver ( not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [umfpack]) or by setting the UMFPACK environment variable.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:UmfpackNotFoundError" + }, + { + "library": "numpy", + "name": "make_mask_none", + "source_code": "def make_mask_none(newshape, dtype=None):\n if dtype is None:\n result = np.zeros(newshape, dtype=MaskType)\n else:\n result = np.zeros(newshape, dtype=make_mask_descr(dtype))\n return result", + "docstring": "Return a boolean mask of the given shape, filled with False. This function returns a boolean ndarray with all entries False, that can be used in common mask manipulations. If a complex dtype is specified, the type of each field is converted to a boolean type. Parameters ---------- newshape : tuple A tuple indicating the shape of the mask. dtype : {None, dtype}, optional If None, use a MaskType instance. Otherwise, use a new datatype with the same fields as , converted to boolean types. Returns ------- result : ndarray An ndarray of appropriate shape and dtype, filled with False. See Also -------- make_mask : Create a boolean mask from an array. make_mask_descr : Construct a dtype description list from a given dtype. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) >>> dtype dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) array([(False, False), (False, False), (False, False)], dtype=[('foo', '|b1'), ('bar', '|b1')])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:make_mask_none arg:newshape arg:dtype arguments arg arg If Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assign", + "source_code": "def assign(val, name=None):\n if name is None:\n name = parent_name + '_assign'\n return var._strided_slice_assign(begin=begin, end=end, strides=strides, value=val, name=name, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)", + "docstring": "Closure that holds all the arguments to create an assignment.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:assign arg:val arg:name arguments arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "django", + "name": "hexewkb", + "source_code": "@property\ndef hexewkb(self):\n return ewkb_w(dim=3 if self.hasz else 2).write_hex(self)", + "docstring": "Return the EWKB of this Geometry in hexadecimal form. This is an extension of the WKB specification that includes SRID value that are a part of this geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:hexewkb arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "asdict", + "source_code": "def asdict(self) -> dict[str, Any]:\n return {'name': self.name, 'max_abs_diff': self.max_abs_diff, 'max_rel_diff': self.max_rel_diff, 'abs_diff_hist': [self.abs_diff_hist[0].tolist(), self.abs_diff_hist[1].tolist()], 'rel_diff_hist': [self.rel_diff_hist[0].tolist(), self.rel_diff_hist[1].tolist()], 'expected_dtype': str(self.expected_dtype), 'actual_dtype': str(self.actual_dtype)}", + "docstring": "Convert the VerificationInfo object to a dictionary. Returns: A dictionary representation of the VerificationInfo object.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py", + "ast_data": "FunctionDef name:asdict arg:self arguments arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_deserialize_keras_tensors", + "source_code": "def _deserialize_keras_tensors(kwargs, layer_map):\n\n def _deserialize_keras_tensor(t):\n if isinstance(t, tf_utils.ListWrapper):\n t = t.as_list()\n layer_name = t[0]\n node_index = t[1]\n tensor_index = t[2]\n layer = layer_map[layer_name]\n new_node_index = get_node_index(layer, node_index)\n if new_node_index is None:\n raise IndexError\n node = layer._inbound_nodes[new_node_index]\n return nest.flatten(node.outputs)[tensor_index]\n return t\n kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)\n return nest.map_structure(_deserialize_keras_tensor, kwargs)", + "docstring": "Deserializes Keras Tensors passed to ..", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", + "ast_data": "FunctionDef name:_deserialize_keras_tensors arg:kwargs arg:layer_map arguments arg arg FunctionDef name:_deserialize_keras_tensor arg:t arguments arg If Call Assign Call Assign Assign Assign Assign Assign Call If Compare Raise Assign Return return:yes Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_get_axis_line_edge_points", + "source_code": "def _get_axis_line_edge_points(self, minmax, maxmin, position=None):\n mb = [minmax, maxmin]\n mb_rev = mb[::-1]\n mm = [[mb, mb_rev, mb_rev], [mb_rev, mb_rev, mb], [mb, mb, mb]]\n mm = mm[self.axes._vertical_axis][self._axinfo['i']]\n juggled = self._axinfo['juggled']\n edge_point_0 = mm[0].copy()\n if position == 'lower' and mm[1][juggled[-1]] < mm[0][juggled[-1]] or (position == 'upper' and mm[1][juggled[-1]] > mm[0][juggled[-1]]):\n edge_point_0[juggled[-1]] = mm[1][juggled[-1]]\n else:\n edge_point_0[juggled[0]] = mm[1][juggled[0]]\n edge_point_1 = edge_point_0.copy()\n edge_point_1[juggled[1]] = mm[1][juggled[1]]\n return (edge_point_0, edge_point_1)", + "docstring": "Get the edge points for the black bolded axis line.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py", + "ast_data": "FunctionDef name:_get_axis_line_edge_points arg:self arg:minmax arg:maxmin arg:position arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare Assign Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_set_table_descriptor", + "source_code": "def _set_table_descriptor(self, table_descriptor: tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TableDescriptor, num_hosts: int, learning_rate_index: Dict[Callable[[], Any], int]):\n table_descriptor.name = self.name\n table_descriptor.vocabulary_size = max(self.vocabulary_size, num_hosts)\n table_descriptor.dimension = self.dim\n parameters = table_descriptor.optimization_parameters\n if self.optimizer:\n if callable(self.optimizer.learning_rate):\n parameters.learning_rate.dynamic.tag = learning_rate_index[self.optimizer.learning_rate]\n else:\n parameters.learning_rate.constant = self.optimizer.learning_rate\n if self.optimizer.low_dimensional_packing_status:\n parameters.low_dimensional_packing_status = optimization_parameters_pb2.LowDimensionalPackingStatus.Status.ENABLED\n self.optimizer._set_optimization_parameters(parameters)\n if self.quantization_config:\n self.quantization_config._set_optimization_parameters(parameters)", + "docstring": "Set the table descriptor from the table data.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", + "ast_data": "FunctionDef name:_set_table_descriptor arg:self arg:table_descriptor arg:num_hosts arg:learning_rate_index arguments arg arg arg arg Assign Assign Call Assign Assign If If Call Assign Assign If Assign Call If Call" + }, + { + "library": "tensorflow", + "name": "_convert_tflite_enum_type_to_tf_type", + "source_code": "def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n if tf_type is None:\n raise ValueError('Unsupported enum {}. The valid map of enum to tf types is : {}'.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n return tf_type", + "docstring": "Converts tflite enum type (eg: 0) to tf type (eg: tf.float32). Args: tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32) Raises: ValueError: If an invalid tflite enum type is provided. Returns: tf type (eg: tf.float32)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:_convert_tflite_enum_type_to_tf_type arg:tflite_enum_type arguments arg Assign Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "functional_call", + "source_code": "@deprecated('`torch.nn.utils.stateless.functional_call` is deprecated as of PyTorch 2.0 and will be removed in a future version of PyTorch. Please use `torch.func.functional_call` instead which is a drop-in replacement.', category=FutureWarning)\ndef functional_call(module: 'torch.nn.Module', parameters_and_buffers: dict[str, Tensor], args: Optional[Union[Any, tuple]]=None, kwargs: Optional[dict[str, Any]]=None, *, tie_weights: bool=True, strict: bool=False):\n return _functional_call(module, parameters_and_buffers, args, kwargs, tie_weights=tie_weights, strict=strict)", + "docstring": "Perform a functional call on the module by replacing the module parameters and buffers with the provided ones. .. warning:: This API is deprecated as of PyTorch 2.0 and will be removed in a future version of PyTorch. Please use :func: instead, which is a drop-in replacement for this API. .. note:: If the module has active parametrizations, passing a value in the :attr: argument with the name set to the regular parameter name will completely disable the parametrization. If you want to apply the parametrization function to the value passed please set the key as `parameters_and_buffers`.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\stateless.py", + "ast_data": "FunctionDef name:functional_call arg:module arg:parameters_and_buffers arg:args arg:kwargs arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_figure", + "source_code": "def set_figure(self, fig):\n if self._parent_figure is fig:\n return\n if self._parent_figure is not None:\n raise RuntimeError('Can not put single artist in more than one figure')\n self._parent_figure = fig\n if self._parent_figure and self._parent_figure is not self:\n self.pchanged()\n self.stale = True", + "docstring": "Set the or instance the artist belongs to. Parameters ---------- fig : or", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_figure arg:self arg:fig arguments arg arg If Compare Return return:no If Compare Raise Call Assign If BoolOp Compare Call Assign" + }, + { + "library": "sphinx", + "name": "PyClasslike", + "source_code": "class PyClasslike(PyObject):\n option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n option_spec.update({'abstract': directives.flag, 'final': directives.flag})\n allow_nesting = True\n\n def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]:\n prefix: list[addnodes.desc_sig_element] = []\n if 'final' in self.options:\n prefix.extend((addnodes.desc_sig_keyword('', 'final'), addnodes.desc_sig_space()))\n if 'abstract' in self.options:\n prefix.extend((addnodes.desc_sig_keyword('', 'abstract'), addnodes.desc_sig_space()))\n prefix.extend((addnodes.desc_sig_keyword('', self.objtype), addnodes.desc_sig_space()))\n return prefix\n\n def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:\n if self.objtype == 'class':\n if not modname:\n return _('%s (built-in class)') % name_cls[0]\n return _('%s (class in %s)') % (name_cls[0], modname)\n elif self.objtype == 'exception':\n return name_cls[0]\n else:\n return ''", + "docstring": "Description of a class-like object (classes, interfaces, exceptions).", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py", + "ast_data": "ClassDef name:PyClasslike Call Call Assign FunctionDef name:get_signature_prefix arg:self arg:sig arguments arg arg If Compare Call Call Call If Compare Call Call Call Call Call Call Return return:yes FunctionDef name:get_index_text arg:self arg:modname arg:name_cls arguments arg arg arg If Compare If Return return:yes Call Return return:yes Call If Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_transform", + "source_code": "def get_transform(self):\n return self._scale.get_transform()", + "docstring": "Return the transform used in the Axis' scale", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_module", + "source_code": "def _get_module(node: Node, named_modules: dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]:\n if node.op == 'call_module' and str(node.target) in named_modules:\n return named_modules[str(node.target)]\n else:\n return None", + "docstring": "If refers to a call_module node, return the module, else None.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", + "ast_data": "FunctionDef name:_get_module arg:node arg:named_modules arguments arg arg If BoolOp Compare Compare Call Return return:yes Call Return return:no" + }, + { + "library": "scipy", + "name": "_matrix_vector_product_of_stacks", + "source_code": "def _matrix_vector_product_of_stacks(A, b):\n return np.einsum('ijk,ik->ij', A, b)", + "docstring": "Compute the product of stack of matrices and vectors.", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py", + "ast_data": "FunctionDef name:_matrix_vector_product_of_stacks arg:A arg:b arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_set_scale", + "source_code": "def _set_scale(self):\n if self._orientation == 'x':\n pscale = self._parent.xaxis.get_scale()\n set_scale = self.set_xscale\n else:\n pscale = self._parent.yaxis.get_scale()\n set_scale = self.set_yscale\n if pscale == self._parentscale:\n return\n if self._ticks_set:\n ticks = self._axis.get_ticklocs()\n set_scale('functionlog' if pscale == 'log' else 'function', functions=self._functions[::-1])\n if self._ticks_set:\n self._axis.set_major_locator(mticker.FixedLocator(ticks))\n self._parentscale = pscale", + "docstring": "Check if parent has set its scale", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py", + "ast_data": "FunctionDef name:_set_scale arg:self arguments arg If Compare Assign Call Assign Assign Call Assign If Compare Return return:no If Assign Call Call Compare If Call Call Assign" + }, + { + "library": "pytorch", + "name": "_register_buffer_comm_hook", + "source_code": "def _register_buffer_comm_hook(self, state, hook: Callable, comm_hook_location=_BufferCommHookLocation.POST_FORWARD):\n assert callable(hook)\n self.buffer_hook = _BufferCommHook(buffer_comm_hook=hook, buffer_comm_hook_state=state, buffer_comm_hook_location=comm_hook_location)", + "docstring": "Allow custom registration of hooks that define how buffer are synchronized across ranks. The hook takes in an optional state and is passed in a Dict[str, Tensor] corresponding to buffer names and the buffers, and can run arbitrary reductions on buffers as opposed to DDP's default broadcast from rank 0. This is useful for example if a counter needs to be summed or averaged across ranks every iteration. Args: state (Any): Optional state that is passed to the hook. hook (Callable): Callable with the following signature: `` comm_hook_location (_BufferCommHookLocation): Enum value indicating where to run the hook. _BufferCommHookLocation.PRE_FORWARD means that the hook will run _before_ the forward pass, and _BufferCommHookLocation.POST_FORWARD means that the hook will run _after_ the forward pass. NOTE: To maximize performance, users can return a List[torch.futures.Future] from their hook, and DDP will install and await these hooks appropriately at the end of the backward pass. This will ensure all buffers are synchronized by the end of the backward pass. If this setting is used, it is recommended to pass comm_hook_location=_BufferCommHookLocation.POST_FORWARD, which will trigger the hook after the forward pass. If _BufferCommHookLocation.PRE_FORWARD is used, users must ensure appropriate synchronization when manipulating GPU buffers in the forward pass.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_register_buffer_comm_hook arg:self arg:state arg:hook arg:comm_hook_location arguments arg arg arg arg Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_get_attrs_items", + "source_code": "def _get_attrs_items(obj):\n attrs = getattr(obj.__class__, '__attrs_attrs__')\n attr_names = (a.name for a in attrs)\n return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]", + "docstring": "Returns a list of (name, value) pairs from an attrs instance. TODO(b/268078256): check if this comment is valid, and if so, ensure it's handled in the function below. The list will be sorted by name. Args: obj: an object. Returns: A list of (attr_name, attr_value) pairs, sorted by attr_name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py", + "ast_data": "FunctionDef name:_get_attrs_items arg:obj arguments arg Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "write", + "source_code": "@abc.abstractmethod\ndef write(self, file_prefix: str) -> str:\n pass", + "docstring": "Serializes proto to disk. Args: file_prefix: string prefix of the filepath. Returns: The actual path the proto is written to.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", + "ast_data": "FunctionDef name:write arg:self arg:file_prefix arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_obj_reference_counts", + "source_code": "@property\ndef _obj_reference_counts(self):\n self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary())\n return self._obj_reference_counts_dict", + "docstring": "A dictionary counting the number of attributes referencing an object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_obj_reference_counts arg:self arguments arg Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, name: str, make_fx_graph: Callable[..., Any]):\n self.name = f'{name}_{next(SubgraphTemplate.index_counter)}'\n self.make_fx_graph = make_fx_graph", + "docstring": "Initialize a subgraph template. Args: name: The name of this template graph: The FX graph", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\subgraph.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:make_fx_graph arguments arg arg arg Assign Call Assign" + }, + { + "library": "virtualenv", + "name": "get_short_path_name", + "source_code": "def get_short_path_name(long_name):\n import ctypes\n from ctypes import wintypes\n GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW\n GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]\n GetShortPathNameW.restype = wintypes.DWORD\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n output_buf_size = needed", + "docstring": "Gets the short path name of a given long path -", + "type": "function", + "file_path": "virtualenv\\src\\virtualenv\\util\\path\\_win.py", + "ast_data": "FunctionDef name:get_short_path_name arg:long_name arguments arg Assign Assign Assign Assign While Assign Call Assign Call If Compare Return return:yes Assign" + }, + { + "library": "tensorflow", + "name": "_GeneratorDataset", + "source_code": "class _GeneratorDataset(dataset_ops.DatasetSource):\n\n def __init__(self, init_args, init_func, next_func, finalize_func, output_signature, name=None):\n self._init_args = init_args\n self._init_structure = structure.type_spec_from_value(init_args)\n self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=self._init_structure)\n self._next_func = structured_function.StructuredFunctionWrapper(next_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n self._output_signature = output_signature\n self._name = name\n variant_tensor = gen_dataset_ops.generator_dataset(structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._common_args)\n super().__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._output_signature\n\n def _transformation_name(self):\n return 'Dataset.from_generator()'", + "docstring": "A that generates elements by invoking a function.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py", + "ast_data": "ClassDef name:_GeneratorDataset FunctionDef name:__init__ arg:self arg:init_args arg:init_func arg:next_func arg:finalize_func arg:output_signature arg:name arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "benchmarks_main", + "source_code": "def benchmarks_main(true_main, argv=None):\n if argv is None:\n argv = sys.argv\n found_arg = [arg for arg in argv if arg.startswith('--benchmark_filter=') or arg.startswith('-benchmark_filter=')]\n if found_arg:\n argv.remove(found_arg[0])\n regex = found_arg[0].split('=')[1]\n app.run(lambda _: _run_benchmarks(regex), argv=argv)\n else:\n true_main()", + "docstring": "Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py", + "ast_data": "FunctionDef name:benchmarks_main arg:true_main arg:argv arguments arg arg If Compare Assign Assign BoolOp Call Call If Call Assign Call Call arguments arg Call Call" + }, + { + "library": "scipy", + "name": "query_ball_tree", + "source_code": "def query_ball_tree(self, other, r, p=2.0, eps=0):\n return super().query_ball_tree(other, r, p, eps)", + "docstring": "Find all pairs of points between and whose distance is at most r. Parameters ---------- other : KDTree instance The tree containing points to search against. r : float The maximum distance, has to be positive. p : float, optional Which Minkowski norm to use. has to meet the condition ``1 >> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.spatial import KDTree >>> rng = np.random.default_rng() >>> points1 = rng.random((15, 2)) >>> points2 = rng.random((15, 2)) >>> plt.figure(figsize=(6, 6)) >>> plt.plot(points1[:, 0], points1[:, 1], \"xk\", markersize=14) >>> plt.plot(points2[:, 0], points2[:, 1], \"og\", markersize=14) >>> kd_tree1 = KDTree(points1) >>> kd_tree2 = KDTree(points2) >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2) >>> for i in range(len(indexes)): ... for j in indexes[i]: ... plt.plot([points1[i, 0], points2[j, 0]], ... [points1[i, 1], points2[j, 1]], \"-r\") >>> plt.show()", + "type": "method", + "file_path": "scipy\\scipy\\spatial\\_kdtree.py", + "ast_data": "FunctionDef name:query_ball_tree arg:self arg:other arg:r arg:p arg:eps arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_root_anderson_doc", + "source_code": "def _root_anderson_doc():\n pass", + "docstring": "Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_root.py", + "ast_data": "FunctionDef name:_root_anderson_doc arguments" + }, + { + "library": "tensorflow", + "name": "_select_worker_slice", + "source_code": "def _select_worker_slice(worker_id, structured):\n\n def _get(x):\n return x._values[worker_id] if isinstance(x, PerWorkerValues) else x\n return nest.map_structure(_get, structured)", + "docstring": "Selects the worker slice of each of the items in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_select_worker_slice arg:worker_id arg:structured arguments arg arg FunctionDef name:_get arg:x arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "_get_obj_does_not_exist_redirect", + "source_code": "def _get_obj_does_not_exist_redirect(self, request, opts, object_id):\n msg = _('%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?') % {'name': opts.verbose_name, 'key': unquote(object_id)}\n self.message_user(request, msg, messages.WARNING)\n url = reverse('admin:index', current_app=self.admin_site.name)\n return HttpResponseRedirect(url)", + "docstring": "Create a message informing the user that the object doesn't exist and return a redirect to the admin index page.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:_get_obj_does_not_exist_redirect arg:self arg:request arg:opts arg:object_id arguments arg arg arg arg Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_set_categories", + "source_code": "def _set_categories(self, categories, fastpath: bool=False) -> None:\n if fastpath:\n new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)\n else:\n new_dtype = CategoricalDtype(categories, ordered=self.ordered)\n if not fastpath and self.dtype.categories is not None and (len(new_dtype.categories) != len(self.dtype.categories)):\n raise ValueError('new categories need to have the same number of items as the old categories!')\n super().__init__(self._ndarray, new_dtype)", + "docstring": "Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical([\"a\", \"b\"]) >>> c ['a', 'b'] Categories (2, object): ['a', 'b'] >>> c._set_categories(pd.Index([\"a\", \"c\"])) >>> c ['a', 'c'] Categories (2, object): ['a', 'c']", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_set_categories arg:self arg:categories arg:fastpath arguments arg arg arg If Assign Call Assign Call If BoolOp Compare Compare Call Call Raise Call Call Call" + }, + { + "library": "pandas", + "name": "_register_accessor", + "source_code": "@doc(klass='', examples='', others='')\ndef _register_accessor(name: str, cls: type[NDFrame | Index]) -> Callable[[TypeT], TypeT]:\n\n def decorator(accessor: TypeT) -> TypeT:\n if hasattr(cls, name):\n warnings.warn(f'registration of accessor {accessor!r} under name {name!r} for type {cls!r} is overriding a preexisting attribute with the same name.', UserWarning, stacklevel=find_stack_level())\n setattr(cls, name, Accessor(name, accessor))\n cls._accessors.add(name)\n return accessor\n return decorator", + "docstring": "Register a custom accessor on {klass} objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. Returns ------- callable A class decorator. See Also -------- register_dataframe_accessor : Register a custom accessor on DataFrame objects. register_series_accessor : Register a custom accessor on Series objects. register_index_accessor : Register a custom accessor on Index objects. Notes ----- This function allows you to register a custom-defined accessor class for {klass}. The requirements for the accessor class are as follows: * Must contain an init method that: * accepts a single {klass} object * raises an AttributeError if the {klass} object does not have correctly matching inputs for the accessor * Must contain a method for each access pattern. * The methods should be able to take any argument signature. * Accessible using the @property decorator if no additional arguments are needed. Examples -------- {examples}", + "type": "function", + "file_path": "pandas\\pandas\\core\\accessor.py", + "ast_data": "FunctionDef name:_register_accessor arg:name arg:cls arguments arg arg FunctionDef name:decorator arg:accessor arguments arg If Call Call Call Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "flatten", + "source_code": "def flatten(self):\n yield self\n for child in self.children:\n if isinstance(child, tuple):\n child = child[1]\n if hasattr(child, 'flatten'):\n yield from child.flatten()\n else:\n yield child", + "docstring": "Recursively yield this Q object and all subexpressions, in depth-first order.", + "type": "method", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "FunctionDef name:flatten arg:self arguments arg For If Call Assign If Call Call" + }, + { + "library": "scikit-learn", + "name": "LassoBenchmark", + "source_code": "class LassoBenchmark(Predictor, Estimator, Benchmark):\n param_names = ['representation', 'precompute']\n params = (['dense', 'sparse'], [True, False])\n\n def setup_cache(self):\n super().setup_cache()\n\n def make_data(self, params):\n representation, precompute = params\n if representation == 'dense':\n data = _synth_regression_dataset(n_samples=1000000, n_features=100)\n else:\n data = _synth_regression_sparse_dataset(n_samples=50000, n_features=5000, density=0.01)\n return data\n\n def make_estimator(self, params):\n representation, precompute = params\n estimator = Lasso(precompute=precompute, alpha=0.001, random_state=0)\n return estimator\n\n def make_scorers(self):\n make_gen_reg_scorers(self)\n\n def skip(self, params):\n representation, precompute = params\n if representation == 'sparse' and precompute is False:\n return True\n return False", + "docstring": "Benchmarks for Lasso.", + "type": "class", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py", + "ast_data": "ClassDef name:LassoBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call FunctionDef name:skip arg:self arg:params arguments arg arg Assign If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "icdf", + "source_code": "def icdf(self, value: Tensor) -> Tensor:\n raise NotImplementedError", + "docstring": "Returns the inverse cumulative density/mass function evaluated at . Args: value (Tensor):", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:icdf arg:self arg:value arguments arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "clip", + "source_code": "def clip(x: Array, /, min: float | Array | None=None, max: float | Array | None=None) -> Array:\n\n def _isscalar(a: float | Array | None, /) -> TypeIs[float | None]:\n return a is None or isinstance(a, (int, float))\n min_shape = () if _isscalar(min) else min.shape\n max_shape = () if _isscalar(max) else max.shape\n result_shape = np.broadcast_shapes(x.shape, min_shape, max_shape)\n if min is not None:\n min = da.broadcast_to(da.asarray(min), result_shape)\n if max is not None:\n max = da.broadcast_to(da.asarray(max), result_shape)\n if min is None and max is None:\n return da.positive(x)\n if min is None:\n return astype(da.minimum(x, max), x.dtype)\n if max is None:\n return astype(da.maximum(x, min), x.dtype)\n return astype(da.minimum(da.maximum(x, min), max), x.dtype)", + "docstring": "Array API compatibility wrapper for clip(). See the corresponding documentation in the array library and/or the array API specification for more details.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py", + "ast_data": "FunctionDef name:clip arg:min arg:max arguments arg arg arg FunctionDef name:_isscalar arguments arg Return return:yes BoolOp Compare Call Assign Call Assign Call Assign Call If Compare Assign Call Call If Compare Assign Call Call If BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call Call" + }, + { + "library": "scrapy", + "name": "close_connections", + "source_code": "def close_connections(self) -> None:\n for conn in self._connections.values():\n assert conn.transport is not None\n conn.transport.abortConnection()", + "docstring": "Close all the HTTP/2 connections and remove them from pool Returns: Deferred that fires when all connections have been closed", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\agent.py", + "ast_data": "FunctionDef name:close_connections arg:self arguments arg For Call Compare Call" + }, + { + "library": "sphinx", + "name": "get_documenter", + "source_code": "def get_documenter(app: Sphinx, obj: Any, parent: Any) -> type[Documenter]:\n return _get_documenter(obj, parent, registry=app.registry)", + "docstring": "Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py", + "ast_data": "FunctionDef name:get_documenter arg:app arg:obj arg:parent arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_StateStack", + "source_code": "class _StateStack(object):\n\n def __init__(self, type_):\n object.__setattr__(self, 'type', type_)\n object.__setattr__(self, '_stack', [])\n if not hasattr(type_, 'no_root'):\n self.enter()\n\n def __enter__(self):\n self.enter()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.exit()\n\n def enter(self):\n self._stack.append(self.type())\n\n def exit(self):\n self._stack.pop()\n\n @property\n def stack(self):\n return self._stack\n\n @property\n def level(self):\n return len(self._stack)\n\n @property\n def value(self):\n return self._stack[-1]\n\n def __iter__(self):\n return iter(self._stack)\n\n def __getattr__(self, key):\n return getattr(self._stack[-1], key)\n\n def __setattr__(self, key, value):\n setattr(self._stack[-1], key, value)", + "docstring": "Templated context manager. This class provides syntactic sugar for a stack of objects of known type. It allows accessing attributes of the object at the top of the stack directly against this object, which allows for very terse syntax. For example, this code: stack = _StateStack(Foo) stack.enter() stack.bar Is equivalent to: stack = [] stack.append(Foo()) foo = stack[-1] foo.bar See _State for more on how this is used. Attributes: type: Any, the type of objects that this stack holds level: int, the current stack depth stack: List[Any], the actual stack value: Any, the instance of the object at the top of the stack", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py", + "ast_data": "ClassDef name:_StateStack FunctionDef name:__init__ arg:self arg:type_ arguments arg arg Call Call If Call Call FunctionDef name:__enter__ arg:self arguments arg Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call FunctionDef name:enter arg:self arguments arg Call Call FunctionDef name:exit arg:self arguments arg Call FunctionDef name:stack arg:self arguments arg Return return:yes FunctionDef name:level arg:self arguments arg Return return:yes Call FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__getattr__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setattr__ arg:self arg:key arg:value arguments arg arg arg Call" + }, + { + "library": "pandas", + "name": "comparison_op", + "source_code": "def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n lvalues = ensure_wrapped_if_datetimelike(left)\n rvalues = ensure_wrapped_if_datetimelike(right)\n rvalues = lib.item_from_zerodim(rvalues)\n if isinstance(rvalues, list):\n rvalues = np.asarray(rvalues)\n if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):\n if len(lvalues) != len(rvalues):\n raise ValueError('Lengths must match to compare', lvalues.shape, rvalues.shape)\n if should_extension_dispatch(lvalues, rvalues) or ((isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) and lvalues.dtype != object):\n res_values = op(lvalues, rvalues)\n elif is_scalar(rvalues) and isna(rvalues):\n if op is operator.ne:\n res_values = np.ones(lvalues.shape, dtype=bool)\n else:\n res_values = np.zeros(lvalues.shape, dtype=bool)\n elif is_numeric_v_string_like(lvalues, rvalues):\n return invalid_comparison(lvalues, rvalues, op)\n elif lvalues.dtype == object or isinstance(rvalues, str):\n res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)\n else:\n res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)\n return res_values", + "docstring": "Evaluate a comparison operation , , , , , or . Note: the caller is responsible for ensuring that numpy warnings are suppressed (with np.errstate(all=\"ignore\")) if needed. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} Returns ------- ndarray or ExtensionArray", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\array_ops.py", + "ast_data": "FunctionDef name:comparison_op arg:left arg:right arg:op arguments arg arg arg Assign Call Assign Call Assign Call If Call Assign Call If Call If Compare Call Call Raise Call If BoolOp Call BoolOp BoolOp Call Compare Compare Assign Call If BoolOp Call Call If Compare Assign Call Assign Call If Call Return return:yes Call If BoolOp Compare Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "__call__", + "source_code": "def __call__(self, func, *args, **kwargs):\n old_name = self.old_name\n new_name = self.new_name\n message = self.message\n if old_name is None:\n old_name = func.__name__\n if new_name is None:\n depdoc = f'`{old_name}` is deprecated!'\n else:\n depdoc = f'`{old_name}` is deprecated, use `{new_name}` instead!'\n if message is not None:\n depdoc += '\\n' + message\n\n @functools.wraps(func)\n def newfunc(*args, **kwds):\n warnings.warn(depdoc, DeprecationWarning, stacklevel=2)\n return func(*args, **kwds)\n newfunc.__name__ = old_name\n doc = func.__doc__\n if doc is None:\n doc = depdoc\n else:\n lines = doc.expandtabs().split('\\n')\n indent = _get_indent(lines[1:])\n if lines[0].lstrip():\n doc = indent * ' ' + doc\n else:\n skip = len(lines[0]) + 1\n for line in lines[1:]:\n if len(line) > indent:\n break\n skip += len(line) + 1\n doc = doc[skip:]\n depdoc = textwrap.indent(depdoc, ' ' * indent)\n doc = f'{depdoc}\\n\\n{doc}'\n newfunc.__doc__ = doc\n return newfunc", + "docstring": "Decorator call. Refer to ``.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_utils_impl.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:func arguments arg arg arg arg Assign Assign Assign If Compare Assign If Compare Assign Assign If Compare FunctionDef name:newfunc arguments arg arg Call Return return:yes Call Call Assign Assign If Compare Assign Assign Call Call Assign Call If Call Assign Assign Call For If Compare Call Call Assign Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "indices", + "source_code": "@property\ndef indices(self):\n return self._indices", + "docstring": "A 1-D containing the indices of the slices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:indices arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "parse_args", + "source_code": "def parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description='Upload test stats to s3')\n parser.add_argument('--workflow-run-id', type=int, required=True, help='id of the workflow to get artifacts from')\n parser.add_argument('--workflow-run-attempt', type=int, required=True, help='which retry of the workflow this is')\n parser.add_argument('--workflow-name', type=str, required=True, help='id of the workflow to get artifacts from')\n parser.add_argument('--job-id', type=int, required=True, help='id of the workflow to get artifacts from')\n parser.add_argument('--job-name', type=str, required=True, help='id of the workflow to get artifacts from')\n parser.add_argument('--repo', type=str, required=False, help='which GitHub repo this workflow run belongs to')\n parser.add_argument('--debug', action='store_true', help='Enable debug mode')\n parser.add_argument('--dry-run', action='store_true', help='Enable dry-run mode')\n parser.add_argument('--artifact-prefix', type=str, required=False, help='artifact prefix to download raw utilizarion data from s3')\n parser.add_argument('--local-path', type=str, required=False, help='path of the raw utilizarion data from local location')\n return parser.parse_args()", + "docstring": "Parse command line arguments. Returns: argparse.Namespace: Parsed arguments.", + "type": "function", + "file_path": "pytorch\\tools\\stats\\upload_utilization_stats\\upload_utilization_stats.py", + "ast_data": "FunctionDef name:parse_args arguments Assign Call Call Call Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "astype", + "source_code": "def astype(self, dtype: AstypeArg, copy: bool=True) -> ArrayLike:\n dtype = pandas_dtype(dtype)\n if dtype == self.dtype:\n if not copy:\n return self\n else:\n return self.copy()\n if isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n return cls._from_sequence(self, dtype=dtype, copy=copy)\n elif lib.is_np_dtype(dtype, 'M'):\n from pandas.core.arrays import DatetimeArray\n return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)\n elif lib.is_np_dtype(dtype, 'm'):\n from pandas.core.arrays import TimedeltaArray\n return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)\n if not copy:\n return np.asarray(self, dtype=dtype)\n else:\n return np.array(self, dtype=dtype, copy=copy)", + "docstring": "Cast to a NumPy array or ExtensionArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- np.ndarray or pandas.api.extensions.ExtensionArray An ``: >>> arr1 = arr.astype(\"Float64\") >>> arr1 [1.0, 2.0, 3.0] Length: 3, dtype: Float64 >>> arr1.dtype Float64Dtype() Otherwise, we will get a Numpy ndarray: >>> arr2 = arr.astype(\"float64\") >>> arr2 array([1., 2., 3.]) >>> arr2.dtype dtype('float64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:copy arguments arg arg arg Assign Call If Compare If Return return:yes Return return:yes Call If Call Assign Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_default_initializer", + "source_code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n del shape\n if dtype.is_floating:\n initializer = init_ops.glorot_uniform_initializer()\n initializing_from_value = False\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n initializer = init_ops.zeros_initializer()\n initializing_from_value = False\n else:\n raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n return (initializer, initializing_from_value)", + "docstring": "Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:_get_default_initializer arg:self arg:name arg:shape arg:dtype arguments arg arg arg arg If Assign Call Assign If BoolOp Compare Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_axislabel_direction", + "source_code": "def set_axislabel_direction(self, label_direction):\n self._axislabel_add_angle = _api.check_getitem({'+': 0, '-': 180}, label_direction=label_direction)", + "docstring": "Adjust the direction of the axis label. Note that the *label_direction*\\s '+' and '-' are relative to the direction of the increasing coordinate. Parameters ---------- label_direction : {\"+\", \"-\"}", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", + "ast_data": "FunctionDef name:set_axislabel_direction arg:self arg:label_direction arguments arg arg Assign Call" + }, + { + "library": "scipy", + "name": "Infinity", + "source_code": "class Infinity(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n self.global_optimum = [[1e-16 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return sum(x ** 6.0 * (sin(1.0 / x) + 2.0))", + "docstring": "Infinity objective function. This class defines the Infinity [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Infinity}}(x) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right ) + 2 \\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_I.py", + "ast_data": "ClassDef name:Infinity Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "mark_inset", + "source_code": "@_docstring.interpd\ndef mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):\n rect = _TransformedBboxWithCallback(inset_axes.viewLim, parent_axes.transData, callback=parent_axes._unstale_viewLim)\n kwargs.setdefault('fill', bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))\n pp = BboxPatch(rect, **kwargs)\n parent_axes.add_patch(pp)\n p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)\n inset_axes.add_patch(p1)\n p1.set_clip_on(False)\n p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)\n inset_axes.add_patch(p2)\n p2.set_clip_on(False)\n return (pp, p1, p2)", + "docstring": "Draw a box to mark the location of an area represented by an inset axes. This function draws a box in *parent_axes* at the bounding box of *inset_axes*, and shows a connection with the inset axes by drawing lines at the corners, giving a \"zoomed in\" effect. Parameters ---------- parent_axes : Axes which contains the area of the inset axes. inset_axes : The inset axes. loc1, loc2 : {1, 2, 3, 4} Corners to use for connecting the inset axes and the area in the parent axes. **kwargs Patch properties for the lines and box drawn: %(Patch:kwdoc)s Returns ------- pp : The patch drawn to represent the area of the inset axes. p1, p2 : The patches connecting two corners of the inset axes and its area.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", + "ast_data": "FunctionDef name:mark_inset arg:parent_axes arg:inset_axes arg:loc1 arg:loc2 arguments arg arg arg arg arg Assign Call Call Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "validate_introspection_endpoint_auth_signing_alg_values_supported", + "source_code": "def validate_introspection_endpoint_auth_signing_alg_values_supported(self):\n _validate_alg_values(self, 'introspection_endpoint_auth_signing_alg_values_supported', self.introspection_endpoint_auth_methods_supported)", + "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (\"alg\" values) supported by the introspection endpoint for the signature on the JWT [JWT] used to authenticate the client at the introspection endpoint for the \"private_key_jwt\" and \"client_secret_jwt\" authentication methods. This metadata entry MUST be present if either of these authentication methods are specified in the \"introspection_endpoint_auth_methods_supported\" entry. No default algorithms are implied if this entry is omitted. The value \"none\" MUST NOT be used.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_introspection_endpoint_auth_signing_alg_values_supported arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_philox_scramble_seed", + "source_code": "def _philox_scramble_seed(seed):\n key = constant_op.constant([163851598941452064], dtypes.uint64)\n counter = math_ops.cast(seed, dtypes.uint64)\n mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2([4], key=key, counter=counter, dtype=dtypes.uint32, alg=Algorithm.PHILOX.value)\n key = array_ops.reshape(_uint32s_to_uint64(mix[:2]), [1])\n counter = array_ops_stack.stack([0, _uint32s_to_uint64(mix[2:])], axis=0)\n return (key, counter)", + "docstring": "Determines the key and counter for Philox PRNG with the given seed. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. Returns: A pair (key, counter) suitable for V2 stateless RNG ops like .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py", + "ast_data": "FunctionDef name:_philox_scramble_seed arg:seed arguments arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, session_root, watch_fn=None, thread_name_filter=None):\n self._session_root = session_root\n self._watch_fn = watch_fn\n self._thread_name_filter = thread_name_filter\n self._session_wrapper = None", + "docstring": "Create a local debugger command-line interface (CLI) hook. Args: session_root: See doc of . watch_fn: See doc of . thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:session_root arg:watch_fn arg:thread_name_filter arguments arg arg arg arg Assign Assign Assign Assign" + }, + { + "library": "authlib", + "name": "create_token_response", + "source_code": "@hooked\ndef create_token_response(self):\n token = self.generate_token(scope=self.request.payload.scope, include_refresh_token=False)\n log.debug('Issue token %r to %r', token, self.client)\n self.save_token(token)\n return (200, token, self.TOKEN_RESPONSE_HEADER)", + "docstring": "If the access token request is valid and authorized, the authorization server issues an access token as described in Section 5.1. A refresh token SHOULD NOT be included. If the request failed client authentication or is invalid, the authorization server returns an error response as described in Section 5.2. An example successful response: .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Cache-Control: no-store Pragma: no-cache { \"access_token\":\"2YotnFZFEjr1zCsicMWpAA\", \"token_type\":\"example\", \"expires_in\":3600, \"example_parameter\":\"example_value\" } :returns: (status_code, body, headers)", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\client_credentials.py", + "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "has_strategy", + "source_code": "@tf_export('distribute.has_strategy')\ndef has_strategy():\n return get_strategy() is not _get_default_strategy()", + "docstring": "Return if there is a current non-default . Returns: True if inside a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:has_strategy arguments Return return:yes Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "canonicalize_aten_ir_passes", + "source_code": "def canonicalize_aten_ir_passes(gm: torch.fx.GraphModule):\n canonicalize_quant_mapping(gm)", + "docstring": "Canonicalization passes that will run immediately after aot autograd tracing. Thsis must be run before all other graph passes.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py", + "ast_data": "FunctionDef name:canonicalize_aten_ir_passes arg:gm arguments arg Call" + }, + { + "library": "django", + "name": "decr_version", + "source_code": "def decr_version(self, key, delta=1, version=None):\n return self.incr_version(key, -delta, version)", + "docstring": "Subtract delta from the cache version for the supplied key. Return the new version.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:decr_version arg:self arg:key arg:delta arg:version arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "_prepare", + "source_code": "def _prepare(self):\n self.message = str(self.message)\n self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None", + "docstring": "Prepare the message for serialization by forcing the `` to str in case they are lazy translations.", + "type": "method", + "file_path": "django\\django\\contrib\\messages\\storage\\base.py", + "ast_data": "FunctionDef name:_prepare arg:self arguments arg Assign Call Assign Compare Call" + }, + { + "library": "sphinx", + "name": "_load_modules", + "source_code": "def _load_modules(mod_name: str, ignored_module_exps: Iterable[re.Pattern[str]]) -> Set[str]:\n if any((exp.match(mod_name) for exp in ignored_module_exps)):\n return set()\n mod = import_module(mod_name)\n modules = {mod_name}\n if mod.__spec__ is None:\n return modules\n search_locations = mod.__spec__.submodule_search_locations\n for _, sub_mod_name, sub_mod_ispkg in pkgutil.iter_modules(search_locations):\n if sub_mod_name == '__main__':\n continue\n if sub_mod_ispkg:\n modules |= _load_modules(f'{mod_name}.{sub_mod_name}', ignored_module_exps)\n else:\n if any((exp.match(sub_mod_name) for exp in ignored_module_exps)):\n continue\n modules.add(f'{mod_name}.{sub_mod_name}')\n return modules", + "docstring": "Recursively load all submodules. :param mod_name: The name of a module to load submodules for. :param ignored_module_exps: A list of regexes for modules to ignore. :returns: A set of modules names including the provided module name, `` could not be loaded.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\coverage.py", + "ast_data": "FunctionDef name:_load_modules arg:mod_name arg:ignored_module_exps arguments arg arg If Call Call Return return:yes Call Assign Call Assign If Compare Return return:yes Assign For Call If Compare If Call If Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "ptr", + "source_code": "@property\ndef ptr(self) -> int:\n return self._buffer.address", + "docstring": "Pointer to start of the buffer as an integer.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\buffer.py", + "ast_data": "FunctionDef name:ptr arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__rfloordiv__", + "source_code": "def __rfloordiv__(self, other):\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(other.value // self._value)", + "docstring": "Returns the quotient of and rounded down. Args: other: Another Dimension, or a value accepted by . Returns: A whose value is the integer quotient of and .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:__rfloordiv__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "is_platform_arm", + "source_code": "def is_platform_arm() -> bool:\n return platform.machine() in ('arm64', 'aarch64') or platform.machine().startswith('armv')", + "docstring": "Checking if the running platform use ARM architecture. Returns ------- bool True if the running platform uses ARM architecture.", + "type": "function", + "file_path": "pandas\\pandas\\compat\\__init__.py", + "ast_data": "FunctionDef name:is_platform_arm arguments Return return:yes BoolOp Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "allclose", + "source_code": "def allclose(a: TensorLikeType, b: TensorLikeType, rtol: float=1e-05, atol: float=1e-08, equal_nan: bool=False) -> bool:\n _check_close_args(name='torch.allclose', a=a, b=b, rtol=rtol, atol=atol)\n return bool(torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item())", + "docstring": "Reference implementation of torch.allclose", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:allclose arg:a arg:b arg:rtol arg:atol arg:equal_nan arguments arg arg arg arg arg Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_dict_to_tensor", + "source_code": "def _dict_to_tensor(self, x, k1, k2, k3):\n return array_ops_stack.stack([array_ops_stack.stack([array_ops_stack.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)])", + "docstring": "Convert a dictionary to a tensor. Args: x: A k1 * k2 dictionary. k1: First dimension of x. k2: Second dimension of x. k3: Third dimension of x. Returns: A k1 * k2 * k3 tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_dict_to_tensor arg:self arg:x arg:k1 arg:k2 arg:k3 arguments arg arg arg arg arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "reshape_scale", + "source_code": "def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor:\n new_shape = [1] * input.ndim\n new_shape[axis] = input.size(axis)\n return scale.view(new_shape)", + "docstring": "Reshapes the scale so that we can multiply it to the input by the given axis.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:reshape_scale arg:scale arg:axis arg:input arguments arg arg arg Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_from_proto_fn", + "source_code": "def _from_proto_fn(v, import_scope=None):\n if v.is_resource:\n return resource_variable_ops.ResourceVariable.from_proto(v, import_scope=import_scope)\n return variable_v1.VariableV1.from_proto(v, import_scope=import_scope)", + "docstring": "Creates Variable or ResourceVariable from VariableDef as needed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:_from_proto_fn arg:v arg:import_scope arguments arg arg If Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_pivot_row", + "source_code": "def _pivot_row(T, basis, pivcol, phase, tol=1e-09, bland=False):\n if phase == 1:\n k = 2\n else:\n k = 1\n ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)\n if ma.count() == 0:\n return (False, np.nan)\n mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)\n q = mb / ma\n min_rows = np.ma.nonzero(q == q.min())[0]\n if bland:\n return (True, min_rows[np.argmin(np.take(basis, min_rows))])\n return (True, min_rows[0])", + "docstring": "Given a linear programming simplex tableau, determine the row for the pivot operation. Parameters ---------- T : 2-D array A 2-D array representing the simplex tableau, T, corresponding to the linear programming problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a Problem in which a basic feasible solution is sought prior to maximizing the actual objective. ``. basis : array A list of the current basic variables. pivcol : int The index of the pivot column. phase : int The phase of the simplex algorithm (1 or 2). tol : float Elements in the pivot column smaller than tol will not be considered for pivoting. Nominally this value is zero, but numerical issues cause a tolerance about zero to be necessary. bland : bool If True, use Bland's rule for selection of the row (if more than one row can be used, choose the one with the lowest variable index). Returns ------- status: bool True if a suitable pivot row was found, otherwise False. A return of False indicates that the linear programming problem is unbounded. row: int The index of the row of the pivot element. If status is False, row will be returned as nan.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_simplex.py", + "ast_data": "FunctionDef name:_pivot_row arg:T arg:basis arg:pivcol arg:phase arg:tol arg:bland arguments arg arg arg arg arg arg If Compare Assign Assign Assign Call Compare If Compare Call Return return:yes Assign Call Compare Assign Assign Call Compare Call If Return return:yes Call Call Return return:yes" + }, + { + "library": "django", + "name": "geom_type", + "source_code": "@property\ndef geom_type(self):\n return OGRGeomType(capi.get_geom_type(self.ptr))", + "docstring": "Return the Type for this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:geom_type arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_construct_node", + "source_code": "def _construct_node(signature: _schemas.OpSignature, named_inputs: Mapping[str, ir.Value | None], named_attrs: Mapping[str, ValidAttributeType], opset: onnxscript.values.Opset, num_outputs: int) -> ir.Node:\n inputs: list[ir.Value | None] = []\n for value in named_inputs.values():\n if isinstance(value, Sequence):\n inputs.extend(value)\n else:\n inputs.append(value)\n for input in reversed(inputs):\n if input is not None:\n break\n inputs.pop()\n attributes = [attr for attr in ir_convenience.convert_attributes(named_attrs) if attr.value is not None]\n outputs = [_tensors.SymbolicTensor(opset) for _ in range(num_outputs)]\n return ir.Node(signature.domain, signature.name, inputs=inputs, attributes=attributes, outputs=outputs, version=signature.opset_version)", + "docstring": "Construct the node with the inputs and attributes. Variadic inputs are flattened. Args: signature: The OpSignature for the node. named_inputs: The mapping of parameter names to their arguments. When we do not have the schema of an operator, we do not know the names of the inputs, in which case the names can be anything because they are not used in this function. The data structure is passed in for consistency with the other functions. named_attrs: The mapping of attribute names to their values. num_outputs: The number of outputs for the node.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py", + "ast_data": "FunctionDef name:_construct_node arg:signature arg:named_inputs arg:named_attrs arg:opset arg:num_outputs arguments arg arg arg arg arg For Call If Call Call Call For Call If Compare Call Assign Call Compare Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "graphviz_dump_transform", + "source_code": "def graphviz_dump_transform(transform, dest, *, highlight=None):\n if highlight is None:\n highlight = [transform]\n seen = set()\n\n def recurse(root, buf):\n if id(root) in seen:\n return\n seen.add(id(root))\n props = {}\n label = type(root).__name__\n if root._invalid:\n label = f'[{label}]'\n if root in highlight:\n props['style'] = 'bold'\n props['shape'] = 'box'\n props['label'] = '\"%s\"' % label\n props = ' '.join(map('{0[0]}={0[1]}'.format, props.items()))\n buf.write(f'{id(root)} [{props}];\\n')\n for key, val in vars(root).items():\n if isinstance(val, TransformNode) and id(root) in val._parents:\n buf.write(f'\"{id(root)}\" -> \"{id(val)}\" [label=\"{key}\", fontsize=10];\\n')\n recurse(val, buf)\n buf = StringIO()\n buf.write('digraph G {\\n')\n recurse(transform, buf)\n buf.write('}\\n')\n subprocess.run(['dot', '-T', Path(dest).suffix[1:], '-o', dest], input=buf.getvalue().encode('utf-8'), check=True)", + "docstring": "Generate a graphical representation of the transform tree for *transform* using the :program: program (which this function depends on). The output format (png, dot, etc.) is determined from the suffix of *dest*. Parameters ---------- transform : The represented transform. dest : str Output filename. The extension must be one of the formats supported by :program:, e.g. png, svg, dot, ... (see highlight : list of or None The transforms in the tree to be drawn in bold. If *None*, *transform* is highlighted.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_internal_utils.py", + "ast_data": "FunctionDef name:graphviz_dump_transform arg:transform arg:dest arguments arg arg arg If Compare Assign Assign Call FunctionDef name:recurse arg:root arg:buf arguments arg arg If Compare Call Return return:no Call Call Assign Assign Call If Assign If Compare Assign Assign Assign Assign Call Call Call Call Call For Call Call If BoolOp Call Compare Call Call Call Call Call Assign Call Call Call Call Call Call Call Call" + }, + { + "library": "authlib", + "name": "ClientMetadataClaims", + "source_code": "class ClientMetadataClaims(BaseClaims):\n REGISTERED_CLAIMS = ['require_signed_request_object']\n\n def validate(self):\n self._validate_essential_claims()\n self.validate_require_signed_request_object()\n\n def validate_require_signed_request_object(self):\n self.setdefault('require_signed_request_object', False)\n if not isinstance(self['require_signed_request_object'], bool):\n raise InvalidClaimError('require_signed_request_object')\n self._validate_claim_value('require_signed_request_object')", + "docstring": "Additional client metadata can be used with :ref: and :ref: endpoints. This can be used with:: server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) ) server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) )", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc9101\\registration.py", + "ast_data": "ClassDef name:ClientMetadataClaims Assign FunctionDef name:validate arg:self arguments arg Call Call FunctionDef name:validate_require_signed_request_object arg:self arguments arg Call If Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "Dropout", + "source_code": "class Dropout(torch.nn.Dropout):\n\n def forward(self, input):\n return input\n\n def _get_name(self):\n return 'QuantizedDropout'\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n return cls(mod.p, mod.inplace)\n\n @classmethod\n def from_reference(cls, mod, scale, zero_point):\n return cls(mod.p, mod.inplace)", + "docstring": "This is the quantized equivalent of :class:. And this is a placeholder to enable models where fp32 tensors had dropout to work with quantized tensors in train and eval mode. Args: p: probability of an element to be zeroed inplace: can optionally do the operation in-place. Default: ``", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\dropout.py", + "ast_data": "ClassDef name:Dropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "__getitem__", + "source_code": "def __getitem__(self, name):\n if not isinstance(name, str):\n raise KeyError('Only string keys are supported')\n return self.named_transformers[name]", + "docstring": "Return transformer with name.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:name arguments arg arg If Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_lars_path_residues", + "source_code": "def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lar', verbose=False, fit_intercept=True, max_iter=500, eps=np.finfo(float).eps, positive=False):\n X_train = _check_copy_and_writeable(X_train, copy)\n y_train = _check_copy_and_writeable(y_train, copy)\n X_test = _check_copy_and_writeable(X_test, copy)\n y_test = _check_copy_and_writeable(y_test, copy)\n if fit_intercept:\n X_mean = X_train.mean(axis=0)\n X_train -= X_mean\n X_test -= X_mean\n y_mean = y_train.mean(axis=0)\n y_train = as_float_array(y_train, copy=False)\n y_train -= y_mean\n y_test = as_float_array(y_test, copy=False)\n y_test -= y_mean\n alphas, active, coefs = lars_path(X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps, positive=positive)\n residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]\n return (alphas, active, coefs, residues.T)", + "docstring": "Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array-like of shape (n_samples, n_features) The data to fit the LARS on y_train : array-like of shape (n_samples,) The target variable to fit LARS on X_test : array-like of shape (n_samples, n_features) The data to compute the residues on y_test : array-like of shape (n_samples,) The target variable to compute the residues on Gram : None, 'auto' or array-like of shape (n_features, n_features), default=None Precomputed Gram matrix (X' * X), if ``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array-like of shape (n_features, n_alphas) Coefficients along the path residues : array-like of shape (n_alphas, n_samples) Residues of the prediction on the test data", + "type": "function", + "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py", + "ast_data": "FunctionDef name:_lars_path_residues arg:X_train arg:y_train arg:X_test arg:y_test arg:Gram arg:copy arg:method arg:verbose arg:fit_intercept arg:max_iter arg:eps arg:positive arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_is_causal_behavior", + "source_code": "def _is_causal_behavior(rank: int, world_size: int, i: int, is_causal: bool) -> _CausalBehavior:\n if not is_causal:\n return _CausalBehavior.NOT_IS_CAUSAL\n if i == 0:\n return _CausalBehavior.IS_CAUSAL\n source_rank = (rank - i) % world_size\n if source_rank < rank or _cp_options.enable_load_balance:\n return _CausalBehavior.NOT_IS_CAUSAL\n else:\n return _CausalBehavior.SKIP", + "docstring": "Calculate is_causal behavior for each KV block. The attention can either be calculated in full, not at all or with the causal mask applied.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py", + "ast_data": "FunctionDef name:_is_causal_behavior arg:rank arg:world_size arg:i arg:is_causal arguments arg arg arg arg If Return return:yes If Compare Return return:yes Assign If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "stem", + "source_code": "def stem(self, word: str) -> str:\n return word", + "docstring": "This method implements stemming algorithm of the Python version. Default implementation does nothing. You should implement this if the language has any stemming rules. This class is used to preprocess search words before registering them in the search index. The stemming of the Python version and the JS version (given in the js_stemmer_code attribute) must be compatible.", + "type": "method", + "file_path": "sphinx\\sphinx\\search\\__init__.py", + "ast_data": "FunctionDef name:stem arg:self arg:word arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "extract_weights", + "source_code": "def extract_weights(mod: nn.Module) -> tuple[tuple[Tensor, ...], tuple[str, ...], dict[str, list[str]]]:\n return _extract_members(mod, mod.named_parameters, nn.Parameter)", + "docstring": "This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\make_functional.py", + "ast_data": "FunctionDef name:extract_weights arg:mod arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_build_tree", + "source_code": "def _build_tree(self) -> bytes:\n raise AbstractMethodError(self)", + "docstring": "Build tree from data. This method initializes the root and builds attributes and elements with optional namespaces.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\xml.py", + "ast_data": "FunctionDef name:_build_tree arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "default_trim_value", + "source_code": "def default_trim_value():\n return geos_version_tuple() >= (3, 12)", + "docstring": "GEOS changed the default value in 3.12.0. Can be replaced by True when 3.12.0 becomes the minimum supported version.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py", + "ast_data": "FunctionDef name:default_trim_value arguments Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_format_data_list_with_options", + "source_code": "def _format_data_list_with_options(self, data_list):\n if self._options and self._options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and (not self._options.experimental_fetch_to_device):\n return [data_list]\n else:\n return data_list", + "docstring": "Change the data in to a list type if required. The OwnedMultiDeviceIterator returns the list data type, while the PER_REPLICA iterator (when used with prefetch disabled) returns without the enclosed list. This is to fix the inconsistency. Args: data_list: data_list Returns: list", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:_format_data_list_with_options arg:self arg:data_list arguments arg arg If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "convolve1d", + "source_code": "@_ni_docstrings.docfiller\ndef convolve1d(input, weights, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n weights = np.asarray(weights)\n weights = weights[::-1]\n origin = -origin\n if not weights.shape[0] & 1:\n origin -= 1\n if weights.dtype.kind == 'c':\n weights = weights.conj()\n return correlate1d(input, weights, axis, output, mode, cval, origin)", + "docstring": "Calculate a 1-D convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray 1-D sequence of numbers. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0])", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:convolve1d arg:input arg:weights arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call Assign Assign If If Compare Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_hash_pandas_object", + "source_code": "def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]:\n from pandas.core.util.hashing import hash_array\n values = np.asarray(self.categories._values)\n hashed = hash_array(values, encoding, hash_key, categorize=False)\n mask = self.isna()\n if len(hashed):\n result = hashed.take(self._codes)\n else:\n result = np.zeros(len(mask), dtype='uint64')\n if mask.any():\n result[mask] = lib.u8max\n return result", + "docstring": "Hash a Categorical by hashing its categories, and then mapping the codes to the hashes. Parameters ---------- encoding : str hash_key : str categorize : bool Ignored for Categorical. Returns ------- np.ndarray[uint64]", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_hash_pandas_object arg:self arguments arg arg arg arg Assign Call Assign Call Assign Call If Call Assign Call Assign Call Call If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "on_test_batch_end", + "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_test_batch_end(self, batch, logs=None):\n pass", + "docstring": "Called at the end of a batch in methods. Also called at the end of a validation batch in the methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_test_batch_end arg:self arg:batch arg:logs arguments arg arg arg" + }, + { + "library": "pytorch", + "name": "create_default_global_save_plan", + "source_code": "def create_default_global_save_plan(all_plans: list[SavePlan], rewrite_index_hints: bool=True) -> tuple[list[SavePlan], Metadata]:\n md: dict[str, STORAGE_TYPES] = {}\n new_plans = []\n for plan in all_plans:\n new_items = []\n for item in plan.items:\n if not item.type == WriteItemType.SHARD:\n assert item.index.fqn not in md\n if item.type == WriteItemType.BYTE_IO:\n md[item.index.fqn] = BytesStorageMetadata()\n new_items.append(item)\n else:\n assert item.tensor_data is not None\n tensor_md = cast(TensorStorageMetadata, md.setdefault(item.index.fqn, TensorStorageMetadata(properties=item.tensor_data.properties, size=item.tensor_data.size, chunks=[])))\n new_item = item\n if rewrite_index_hints:\n new_index = dataclasses.replace(item.index, index=len(tensor_md.chunks))\n new_item = dataclasses.replace(item, index=new_index)\n new_items.append(new_item)\n assert item.tensor_data.chunk is not None, f'\\n Cannot create MD for tensor without bounds.\\n FQN: {item.index.fqn}\\n '\n tensor_md.chunks.append(item.tensor_data.chunk)\n new_plans.append(dataclasses.replace(plan, items=new_items))\n return (new_plans, Metadata(md))", + "docstring": "Create the global plan and metadata used by DefaultSavePlanner. Metadata is produced by concatenating the metadata of all `` is True.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py", + "ast_data": "FunctionDef name:create_default_global_save_plan arg:all_plans arg:rewrite_index_hints arguments arg arg Assign For Assign For If Compare Compare If Compare Assign Call Call Compare Assign Call Call Call Assign If Assign Call Call Assign Call Call Compare Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_canonical_import", + "source_code": "def get_canonical_import(import_set):\n import_list = sorted(import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))\n return import_list[0][0]", + "docstring": "Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference based on higher priority and alphabetical ordering. Args: import_set: (set) Imports providing the same symbol. This is a set of tuples in the form (import, priority). We want to pick an import with highest priority. Returns: A module name to import", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py", + "ast_data": "FunctionDef name:get_canonical_import arg:import_set arguments arg Assign Call arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ragged_tensor_categorical_crossentropy", + "source_code": "@dispatch.dispatch_for_types(categorical_crossentropy, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n fn = functools.partial(categorical_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n return _ragged_tensor_apply_loss(fn, y_true, y_pred)", + "docstring": "Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels. For example, if , use for non-target labels and for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. Returns: Categorical crossentropy loss value. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively the resulting loss is the sum of the individual loss values divided by 3.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_ragged_tensor_categorical_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_ConverterData", + "source_code": "class _ConverterData(object):\n\n def __init__(self, graph_def, variable_names_allowlist=None, variable_names_denylist=None):\n self._graph_def = graph_def\n self._tensor_data = {}\n self._build_node_defs_list()\n self._variable_names_allowlist = variable_names_allowlist\n self._variable_names_denylist = variable_names_denylist\n\n @property\n def graph_def(self):\n return self._graph_def\n\n @property\n def node_defs(self):\n return self._node_defs\n\n @property\n def tensor_data(self):\n return self._tensor_data\n\n def _should_convert(self, name):\n return (self._variable_names_allowlist is None or name in self._variable_names_allowlist) and (self._variable_names_denylist is None or name not in self._variable_names_denylist)\n\n def _build_node_defs_list(self):\n self._node_defs = {node.name: node for node in self._graph_def.node}\n if self._graph_def.library:\n for func in self._graph_def.library.function:\n self._node_defs.update({node.name: node for node in func.node_def if node.op in _CONTROL_FLOW_OPS})", + "docstring": "Container for constant conversion supporting data. The data includes the graph being converted, and the pre-converted tensors. This class will be specialized for ConcreteFunction and Session-based conversions, as the means to obtain that data is different for each case.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "ClassDef name:_ConverterData FunctionDef name:__init__ arg:self arg:graph_def arg:variable_names_allowlist arg:variable_names_denylist arguments arg arg arg arg Assign Assign Call Assign Assign FunctionDef name:graph_def arg:self arguments arg Return return:yes FunctionDef name:node_defs arg:self arguments arg Return return:yes FunctionDef name:tensor_data arg:self arguments arg Return return:yes FunctionDef name:_should_convert arg:self arg:name arguments arg arg Return return:yes BoolOp BoolOp Compare Compare BoolOp Compare Compare FunctionDef name:_build_node_defs_list arg:self arguments arg Assign If For Call Compare" + }, + { + "library": "scikit-learn", + "name": "_m_step", + "source_code": "def _m_step(self, X, log_resp):\n n_samples, _ = X.shape\n nk, xk, sk = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type)\n self._estimate_weights(nk)\n self._estimate_means(nk, xk)\n self._estimate_precisions(nk, xk, sk)", + "docstring": "M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py", + "ast_data": "FunctionDef name:_m_step arg:self arg:X arg:log_resp arguments arg arg arg Assign Assign Call Call Call Call Call" + }, + { + "library": "kornia", + "name": "is_intensity_only", + "source_code": "def is_intensity_only(self, strict: bool=True) -> bool:\n for arg in self.children():\n if isinstance(arg, (ImageSequential,)) and (not arg.is_intensity_only(strict)):\n return False\n elif isinstance(arg, (ImageSequential,)):\n pass\n elif isinstance(arg, K.IntensityAugmentationBase2D):\n pass\n elif strict:\n return False\n return True", + "docstring": "Check if all transformations are intensity-based. Args: strict: if strict is False, it will allow non-augmentation Modules to be passed. e.g. will be recognized as non-intensity module if strict is set to True. Note: patch processing would break the continuity of labels (e.g. bbounding boxes, masks).", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\image.py", + "ast_data": "FunctionDef name:is_intensity_only arg:self arg:strict arguments arg arg For Call If BoolOp Call Call Return return:yes If Call If Call If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None):\n if 'initial_accumulator_value' not in config:\n config['initial_accumulator_value'] = 0.1\n if 'lr' in config:\n config['learning_rate'] = config.pop('lr')\n return cls(**config)", + "docstring": "Creates an optimizer from its config. This method is the reverse of , capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\adagrad.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg If Compare Assign If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "pfor", + "source_code": "def pfor(loop_fn, iters, fallback_to_while_loop=True, parallel_iterations=None, warn=False):\n\n def f():\n return _pfor_impl(loop_fn, iters, fallback_to_while_loop=fallback_to_while_loop, parallel_iterations=parallel_iterations, warn=warn)\n functions_run_eagerly = None\n if context.executing_eagerly() or _is_under_xla_context():\n functions_run_eagerly = def_function.functions_run_eagerly()\n if functions_run_eagerly:\n logging.warning('It looks like tf.function behavior was disabled, perhaps using tf.config.run_functions_eagerly. Vectorization primitives (e.g. tf.vectorized_map) require tf.function to work. These primitives will override the disable.')\n def_function.run_functions_eagerly(False)\n f = def_function.function(f)\n outputs = f()\n if functions_run_eagerly is not None:\n def_function.run_functions_eagerly(functions_run_eagerly)\n return outputs", + "docstring": "Equivalent to running times and stacking the outputs. has functionality similar to , i.e. running times, with input from 0 to , and stacking corresponding output of each iteration. However the implementation does not use a . Instead it adds new operations to the graph that collectively compute the same value as what running in a loop would compute. This is an experimental feature and currently has a lot of limitations: - There should be no data dependency between the different iterations. For example, a future iteration should not depend on a value or side-effect of a previous iteration. - Stateful kernels may mostly not be supported since these often imply a data dependency or ordering of the iterations. We do support a limited set of such stateful kernels though (like RandomFoo, Variable operations like reads, etc). - Conversion works only on a limited set of kernels for which a converter has been registered. - has limited support for control flow operations. in particular is not supported. - should return nested structure of Tensors or Operations. However if an Operation is returned, it should have zero outputs. - The shape and dtype of outputs should not depend on the input to loop_fn. Args: loop_fn: A function that takes an int32 scalar tf.Tensor object representing the iteration number, and optionally a keyword argument set to a PForConfig object. It returns a possibly nested structure of Tensor or Operation objects. Note that if setting argument to something other than None, may be called more than once during graph construction. So it may need to avoid mutating global state. iters: Number of iterations for which to run . fallback_to_while_loop: If true, on failing to vectorize an operation, pfor fallbacks to using a to dispatch the iterations. parallel_iterations: A knob to control how many iterations are vectorized and dispatched in parallel. The default value of None corresponds to vectorizing all the iterations. If is smaller than , then chunks of at most that many iterations are dispatched in sequence. This knob can be used to control the total memory usage. warn: Whether or not to warn when falling back to while loops. Returns: Returns a nested structure of stacked tensor objects with the same nested structure as the output of . Raises: ValueError: If parallel_iterations is not None and not an integer > 1.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py", + "ast_data": "FunctionDef name:pfor arg:loop_fn arg:iters arg:fallback_to_while_loop arg:parallel_iterations arg:warn arguments arg arg arg arg arg FunctionDef name:f arguments Return return:yes Call Assign If BoolOp Call Call Assign Call If Call Call Assign Call Assign Call If Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "Ellipse", + "source_code": "@_register_style(_style_list)\nclass Ellipse:\n\n def __init__(self, pad=0.3):\n self.pad = pad\n\n def __call__(self, x0, y0, width, height, mutation_size):\n pad = mutation_size * self.pad\n width, height = (width + 2 * pad, height + 2 * pad)\n x0, y0 = (x0 - pad, y0 - pad)\n a = width / math.sqrt(2)\n b = height / math.sqrt(2)\n trans = Affine2D().scale(a, b).translate(x0 + width / 2, y0 + height / 2)\n return trans.transform_path(Path.unit_circle())", + "docstring": "An elliptical box. .. versionadded:: 3.7", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:Ellipse FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_train_with_multi_worker", + "source_code": "def _train_with_multi_worker(method):\n\n def wrapper(model, **kwargs):\n\n def _worker_fn(_):\n callbacks = kwargs.pop('callbacks', None)\n filtered_callbacks = dist_utils.filter_distributed_callbacks(callbacks, model)\n kwargs['callbacks'] = filtered_callbacks\n return method(model, **kwargs)\n return dc.run_distribute_coordinator(_worker_fn, model._distribution_strategy)\n return wrapper", + "docstring": "Decorator that handles multi worker training with distribution strategy.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "FunctionDef name:_train_with_multi_worker arg:method arguments arg FunctionDef name:wrapper arg:model arguments arg arg FunctionDef name:_worker_fn arg:_ arguments arg Assign Call Assign Call Assign Return return:yes Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_Barrier", + "source_code": "class _Barrier(object):\n\n def __init__(self, num_participants):\n self._num_participants = num_participants\n self._counter = 0\n self._flag = False\n self._local_sense = threading.local()\n self._lock = threading.Lock()\n self._condition = threading.Condition()\n\n def wait(self):\n self._local_sense.value = not self._flag\n with self._lock:\n self._counter += 1\n if self._counter == self._num_participants:\n self._counter = 0\n self._flag = self._local_sense.value\n with self._condition:\n while self._flag != self._local_sense.value:\n self._condition.wait()\n self._condition.notify_all()", + "docstring": "A reusable barrier class for worker synchronization.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "ClassDef name:_Barrier FunctionDef name:__init__ arg:self arg:num_participants arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call FunctionDef name:wait arg:self arguments arg Assign With If Compare Assign Assign With While Compare Call Call" + }, + { + "library": "scipy", + "name": "_fragment_3_1", + "source_code": "def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):\n if ell < 1:\n raise ValueError('expected ell to be a positive integer')\n best_m = None\n best_s = None\n if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):\n for m, theta in _theta.items():\n s = int(np.ceil(norm_info.onenorm() / theta))\n if best_m is None or m * s < best_m * best_s:\n best_m = m\n best_s = s\n else:\n for p in range(2, _compute_p_max(m_max) + 1):\n for m in range(p * (p - 1) - 1, m_max + 1):\n if m in _theta:\n s = _compute_cost_div_m(m, p, norm_info)\n if best_m is None or m * s < best_m * best_s:\n best_m = m\n best_s = s\n best_s = max(best_s, 1)\n return (best_m, best_s)", + "docstring": "A helper function for the _expm_multiply_* functions. Parameters ---------- norm_info : LazyOperatorNormInfo Information about norms of certain linear operators of interest. n0 : int Number of columns in the _expm_multiply_* B matrix. tol : float Expected to be :math: for single precision or :math: for double precision. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- best_m : int Related to bounds for error control. best_s : int Amount of scaling. Notes ----- This is code fragment (3.1) in Al-Mohy and Higham (2011). The discussion of default values for m_max and ell is given between the definitions of equation (3.11) and the definition of equation (3.12).", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py", + "ast_data": "FunctionDef name:_fragment_3_1 arg:norm_info arg:n0 arg:tol arg:m_max arg:ell arguments arg arg arg arg arg If Compare Raise Call Assign Assign If Call Call For Call Assign Call Call Call If BoolOp Compare Compare Assign Assign For Call Call For Call If Compare Assign Call If BoolOp Compare Compare Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__rsub__", + "source_code": "def __rsub__(self, other):\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(other.value - self._value)", + "docstring": "Returns the subtraction of from . Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is the subtraction of from .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:__rsub__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "_no_match", + "source_code": "def _no_match(dm: Tensor) -> Tuple[Tensor, Tensor]:\n dists = torch.empty(0, 1, device=dm.device, dtype=dm.dtype)\n idxs = torch.empty(0, 2, device=dm.device, dtype=torch.long)\n return (dists, idxs)", + "docstring": "Output empty tensors. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\core.py", + "ast_data": "FunctionDef name:_no_match arg:dm arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "is_categorical_dtype", + "source_code": "def is_categorical_dtype(arr_or_dtype) -> bool:\n warnings.warn('is_categorical_dtype is deprecated and will be removed in a future version. Use isinstance(dtype, pd.CategoricalDtype) instead', DeprecationWarning, stacklevel=2)\n if isinstance(arr_or_dtype, ExtensionDtype):\n return arr_or_dtype.name == 'category'\n if arr_or_dtype is None:\n return False\n return CategoricalDtype.is_dtype(arr_or_dtype)", + "docstring": "Check whether an array-like or dtype is of the Categorical dtype. .. deprecated:: 2.2.0 Use isinstance(dtype, pd.CategoricalDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Categorical dtype. See Also -------- api.types.is_list_like: Check if the object is list-like. api.types.is_complex_dtype: Check whether the provided array or dtype is of a complex dtype. Examples -------- >>> from pandas.api.types import is_categorical_dtype >>> from pandas import CategoricalDtype >>> is_categorical_dtype(object) False >>> is_categorical_dtype(CategoricalDtype()) True >>> is_categorical_dtype([1, 2, 3]) False >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) True >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) True", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\common.py", + "ast_data": "FunctionDef name:is_categorical_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes Compare If Compare Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "OGREnvelope", + "source_code": "class OGREnvelope(Structure):\n _fields_ = [('MinX', c_double), ('MaxX', c_double), ('MinY', c_double), ('MaxY', c_double)]", + "docstring": "Represent the OGREnvelope C Structure.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py", + "ast_data": "ClassDef name:OGREnvelope Assign" + }, + { + "library": "tensorflow", + "name": "_initialize", + "source_code": "def _initialize(self):\n self._event_queue = CloseableQueue(self._max_queue)\n self._worker = _EventLoggerThread(self._event_queue, self._ev_writer, self._flush_secs, self._flush_complete, self._flush_sentinel, self._close_sentinel)\n self._worker.start()", + "docstring": "Initializes or re-initializes the queue and writer thread. The EventsWriter itself does not need to be re-initialized explicitly, because it will auto-initialize itself if used after being closed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", + "ast_data": "FunctionDef name:_initialize arg:self arguments arg Assign Call Assign Call Call" + }, + { + "library": "django", + "name": "boundary", + "source_code": "@property\ndef boundary(self):\n return self._topology(capi.geos_boundary(self.ptr))", + "docstring": "Return the boundary as a newly allocated Geometry object.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:boundary arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_replace_path", + "source_code": "def _replace_path(self, source_class):\n replace_dict = {'_base._AxesBase': 'Axes', '_axes.Axes': 'Axes'}\n for key, value in replace_dict.items():\n source_class = source_class.replace(key, value)\n return source_class", + "docstring": "Changes the full path to the public API path that is used in sphinx. This is needed for links to work.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:_replace_path arg:self arg:source_class arguments arg arg Assign For Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_deduplicate_indexed_slices", + "source_code": "def _deduplicate_indexed_slices(values, indices):\n unique_indices, new_index_positions = array_ops.unique(indices)\n summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0])\n return (summed_values, unique_indices)", + "docstring": "Sums associated with any non-unique . Args: values: A with rank >= 1. indices: A one-dimensional integer , indexing into the first dimension of (as in an IndexedSlices object). Returns: A tuple of (, ) where is a de-duplicated version of and contains the sum of slices associated with each unique index.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_deduplicate_indexed_slices arg:values arg:indices arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "should_redirect_with_slash", + "source_code": "def should_redirect_with_slash(self, request):\n if settings.APPEND_SLASH and (not request.path_info.endswith('/')):\n urlconf = getattr(request, 'urlconf', None)\n if not is_valid_path(request.path_info, urlconf):\n match = is_valid_path('%s/' % request.path_info, urlconf)\n if match:\n view = match.func\n return getattr(view, 'should_append_slash', True)\n return False", + "docstring": "Return True if settings.APPEND_SLASH is True and appending a slash to the request path turns an invalid path into a valid one.", + "type": "method", + "file_path": "django\\django\\middleware\\common.py", + "ast_data": "FunctionDef name:should_redirect_with_slash arg:self arg:request arguments arg arg If BoolOp Call Assign Call If Call Assign Call If Assign Return return:yes Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "H", + "source_code": "def H(s):\n return md5_hex(s)", + "docstring": "Return an `` HEX hash.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py", + "ast_data": "FunctionDef name:H arg:s arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "CurrentSiteMiddleware", + "source_code": "class CurrentSiteMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n request.site = get_current_site(request)", + "docstring": "Middleware that sets attribute to request object.", + "type": "class", + "file_path": "django\\django\\contrib\\sites\\middleware.py", + "ast_data": "ClassDef name:CurrentSiteMiddleware FunctionDef name:process_request arg:self arg:request arguments arg arg Assign Call" + }, + { + "library": "tensorflow", + "name": "enter_scope", + "source_code": "def enter_scope(self, scf_scope=False):\n self.symbols.append({'types': {}, 'symbols': {}})\n self.curr_table = self.symbols[len(self.symbols) - 1]\n if scf_scope:\n self.scf_scope += 1", + "docstring": "Enter a new scope - at function level.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "FunctionDef name:enter_scope arg:self arg:scf_scope arguments arg arg Call Assign Call If" + }, + { + "library": "pandas", + "name": "get_test_result", + "source_code": "def get_test_result() -> list[bool]:\n global _TEST_RESULT\n res = _TEST_RESULT\n _TEST_RESULT = []\n return res", + "docstring": "Get test result and reset test_results.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expressions.py", + "ast_data": "FunctionDef name:get_test_result arguments Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "find_in_app", + "source_code": "def find_in_app(self, app, path):\n storage = self.storages.get(app)\n if storage and storage.exists(path):\n matched_path = storage.path(path)\n if matched_path:\n return matched_path", + "docstring": "Find a requested static file in an app's static locations.", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "FunctionDef name:find_in_app arg:self arg:app arg:path arguments arg arg arg Assign Call If BoolOp Call Assign Call If Return return:yes" + }, + { + "library": "tensorflow", + "name": "_print_cache", + "source_code": "def _print_cache():\n replica_str = '%d' % file_index\n if self._parameters.trace_dir:\n output_path = os.path.join(self._parameters.trace_dir, _COMPACT_TRACE_FILE_PREFIX) + replica_str + self._get_outfile_suffix()\n output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n else:\n output_stream = sys.stderr\n new_step_line = _REPLICA_ID_TAG + replica_str\n print_ops = []\n if self._parameters.inspect_trace:\n if self._num_signature_dimensions() > 1:\n raise ValueError('Inspecting multi signatures are not supported.')\n if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY:\n print_ops.append(self._inspect_history_cache(cache=cache, replica_id=replica_id, step_num=step_num, tensor_trace_order=tensor_trace_order))\n else:\n print_ops.append(self._inspect_summary_cache(cache=cache, replica_id=replica_id, step_num=step_num, output_stream=output_stream, tensor_trace_order=tensor_trace_order))\n else:\n for i in range(self._num_signature_dimensions()):\n print_ops.append(logging_ops.print_v2(new_step_line, '\\n', cache[:, i], '\\n', summarize=-1, output_stream=output_stream))\n with ops.control_dependencies(print_ops):\n return constant_op.constant(0).op", + "docstring": "Flushes the cache to a file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_print_cache arguments Assign If Assign Call Call Assign Assign Assign Assign If If Compare Call Raise Call If Compare Call Call Call Call For Call Call Call Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "compute_mean_iou", + "source_code": "def compute_mean_iou(_, total_cm):\n sum_over_row = math_ops.cast(math_ops.reduce_sum(total_cm, 0), dtypes.float32)\n sum_over_col = math_ops.cast(math_ops.reduce_sum(total_cm, 1), dtypes.float32)\n cm_diag = math_ops.cast(array_ops.diag_part(total_cm), dtypes.float32)\n denominator = sum_over_row + sum_over_col - cm_diag\n num_valid_entries = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(denominator, 0), dtype=dtypes.float32))\n denominator = array_ops.where(math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator))\n iou = math_ops.divide(cm_diag, denominator)\n result = array_ops.where(math_ops.greater(num_valid_entries, 0), math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)\n return result", + "docstring": "Compute the mean intersection-over-union via the confusion matrix.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:compute_mean_iou arg:_ arg:total_cm arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "end", + "source_code": "def end(self, tag=None, indent=True):\n if tag:\n assert self.__tags, f'unbalanced end({tag})'\n assert _escape_cdata(tag) == self.__tags[-1], f'expected end({self.__tags[-1]}), got {tag}'\n else:\n assert self.__tags, 'unbalanced end()'\n tag = self.__tags.pop()\n if self.__data:\n self.__flush(indent)\n elif self.__open:\n self.__open = 0\n self.__write('/>\\n')\n return\n if indent:\n self.__write(self.__indentation[:len(self.__tags)])\n self.__write(f'\\n')", + "docstring": "Close the current element (opened by the most recent call to :meth:). Parameters ---------- tag Element tag. If given, the tag must match the start tag. If omitted, the current element is closed. indent : bool, default: True", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py", + "ast_data": "FunctionDef name:end arg:self arg:tag arg:indent arguments arg arg arg If Compare Call Assign Call If Call If Assign Call Return return:no If Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_saveable", + "source_code": "def get_saveable(self, var, primary_var, name):\n return values_util.get_on_read_saveable(var, primary_var, name)", + "docstring": "Create a saveable object for the given variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:get_saveable arg:self arg:var arg:primary_var arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "record_applied", + "source_code": "def record_applied(self, app, name):\n self.ensure_schema()\n self.migration_qs.create(app=app, name=name)", + "docstring": "Record that a migration was applied.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\recorder.py", + "ast_data": "FunctionDef name:record_applied arg:self arg:app arg:name arguments arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "OpStrategy", + "source_code": "class OpStrategy(StrategyType):\n\n def __init__(self, strategies: list[PlacementStrategy]) -> None:\n super().__init__()\n self.strategies: list[PlacementStrategy] = strategies\n\n def __str__(self) -> str:\n strategy_list_str = ', '.join([str(strategy) for strategy in self.strategies])\n mesh_shape = self.mesh_shape\n return f'[{strategy_list_str}] @ mesh: {mesh_shape}'\n\n def max_num_shards(self) -> int:\n return max((strategy.output_spec.num_shards for strategy in self.strategies))\n\n @property\n def mesh(self):\n return self.strategies[0].mesh\n\n @property\n def mesh_shape(self):\n return self.strategies[0].mesh.shape\n\n @property\n def ndim(self):\n return self.strategies[0].output_spec.ndim\n\n @property\n def shape(self):\n return self.strategies[0].output_spec.shape", + "docstring": "OpStrategy that consists of a list of placement strategies associated with the op", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py", + "ast_data": "ClassDef name:OpStrategy FunctionDef name:__init__ arg:self arg:strategies arguments arg arg Call Call FunctionDef name:__str__ arg:self arguments arg Assign Call Call Assign Return return:yes FunctionDef name:max_num_shards arg:self arguments arg Return return:yes Call FunctionDef name:mesh arg:self arguments arg Return return:yes FunctionDef name:mesh_shape arg:self arguments arg Return return:yes FunctionDef name:ndim arg:self arguments arg Return return:yes FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "register", + "source_code": "def register(self, name, overwrite=False, **kwargs):\n self._registry[name] = (overwrite, kwargs)\n return self.create_client(name)", + "docstring": "Registers a new remote application. :param name: Name of the remote application. :param overwrite: Overwrite existing config with framework settings. :param kwargs: Parameters for :class:. Find parameters for the given remote app class. When a remote app is registered, it can be accessed with *named* attribute:: oauth.register('twitter', client_id='', ...) oauth.twitter.get('timeline')", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\base_client\\registry.py", + "ast_data": "FunctionDef name:register arg:self arg:name arg:overwrite arguments arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_antialiased", + "source_code": "def set_antialiased(self, aa):\n if aa is None:\n aa = self._get_default_antialiased()\n self._antialiaseds = np.atleast_1d(np.asarray(aa, bool))\n self.stale = True", + "docstring": "Set the antialiasing state for rendering. Parameters ---------- aa : bool or list of bools", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_antialiased arg:self arg:aa arguments arg arg If Compare Assign Call Assign Call Call Assign" + }, + { + "library": "pytorch", + "name": "scale_grads", + "source_code": "def scale_grads(self, grad_scale_factor: int) -> None:\n if grad_scale_factor != 1:\n for p in self.submod.parameters():\n if p.grad is not None:\n p.grad.div_(grad_scale_factor)", + "docstring": "Scale gradients model gradients by , which should be specified in coordination with the loss function used with pipelining. For loss functions which perform 'mean' loss reduction, should be set to num_microbatches. For loss functions that use reduction, should be set to 1. Should only be called once per pipeline schedule step, after all backwards passes have completed.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:scale_grads arg:self arg:grad_scale_factor arguments arg arg If Compare For Call If Compare Call" + }, + { + "library": "seaborn", + "name": "theme", + "source_code": "@property\ndef theme(self) -> dict[str, Any]:\n return self._theme", + "docstring": "Dictionary of base theme parameters for :class:. Keys and values correspond to matplotlib rc params, as documented here:", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "FunctionDef name:theme arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "kurt", + "source_code": "def kurt(self, skipna: bool=True, numeric_only: bool=False, **kwargs) -> DataFrame:\n return self._cython_agg_general('kurt', alt=None, skipna=skipna, numeric_only=numeric_only, **kwargs)", + "docstring": "Return unbiased kurtosis within groups. Parameters ---------- skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. **kwargs Additional keyword arguments to be passed to the function. Returns ------- DataFrame Unbiased kurtosis within groups. See Also -------- DataFrame.kurt : Return unbiased kurtosis over requested axis. Examples -------- >>> arrays = [ ... [ ... \"falcon\", ... \"parrot\", ... \"cockatoo\", ... \"kiwi\", ... \"eagle\", ... \"lion\", ... \"monkey\", ... \"rabbit\", ... \"dog\", ... \"wolf\", ... ], ... [ ... \"bird\", ... \"bird\", ... \"bird\", ... \"bird\", ... \"bird\", ... \"mammal\", ... \"mammal\", ... \"mammal\", ... \"mammal\", ... \"mammal\", ... ], ... ] >>> index = pd.MultiIndex.from_arrays(arrays, names=(\"name\", \"class\")) >>> df = pd.DataFrame( ... { ... \"max_speed\": [ ... 389.0, ... 24.0, ... 70.0, ... np.nan, ... 350.0, ... 80.5, ... 21.5, ... 15.0, ... 40.0, ... 50.0, ... ] ... }, ... index=index, ... ) >>> df max_speed name class falcon bird 389.0 parrot bird 24.0 cockatoo bird 70.0 kiwi bird NaN eagle bird 350.0 lion mammal 80.5 monkey mammal 21.5 rabbit mammal 15.0 dog mammal 40.0 wolf mammal 50.0 >>> gb = df.groupby([\"class\"]) >>> gb.kurt() max_speed class bird -5.493277 mammal 0.204125 >>> gb.kurt(skipna=False) max_speed class bird NaN mammal 0.204125", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\generic.py", + "ast_data": "FunctionDef name:kurt arg:self arg:skipna arg:numeric_only arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_split_cluster_for_evaluator", + "source_code": "def _split_cluster_for_evaluator(cluster_spec, task_type):\n new_cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n if task_type == _TaskType.EVALUATOR:\n assert _TaskType.EVALUATOR in new_cluster_spec\n new_cluster_spec = {_TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]}\n else:\n new_cluster_spec.pop(_TaskType.EVALUATOR, None)\n return normalize_cluster_spec(new_cluster_spec)", + "docstring": "Split the cluster for evaluator since it needn't talk to other tasks.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:_split_cluster_for_evaluator arg:cluster_spec arg:task_type arguments arg arg Assign Call Call If Compare Compare Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Problem21", + "source_code": "class Problem21(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(0, 10)]\n self.global_optimum = 4.79507\n self.fglob = -9.50835\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return x * sin(x) + x * cos(2.0 * x)", + "docstring": "Univariate Problem21 objective function. This class defines the Univariate Problem21 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem21}}(x) = x \\sin(x) + x \\cos(2x) Bound constraints: :math: .. figure:: figures/Problem21.png :alt: Univariate Problem21 function :align: center **Univariate Problem21 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem21 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(regressor=self._get_regressor(), method_mapping=MethodMapping().add(caller='fit', callee='fit').add(caller='predict', callee='predict'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\compose\\_target.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_tensor_shape_match", + "source_code": "def _is_tensor_shape_match(self, shape_a: TensorShape, shape_b: TensorShape) -> bool:\n for s_a, s_b in zip(shape_a.as_list(), shape_b.as_list()):\n if s_a and s_b and (s_a != s_b):\n return False\n return True", + "docstring": "Check if shape b matches with shape a.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_is_tensor_shape_match arg:self arg:shape_a arg:shape_b arguments arg arg arg For Call Call Call If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "gather_nd", + "source_code": "def gather_nd(self, indices, name=None):\n val = self._variable.gather_nd(indices, name=name)\n return math_ops.cast(val, self._cast_dtype)", + "docstring": "Gather slices of the variable into a Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py", + "ast_data": "FunctionDef name:gather_nd arg:self arg:indices arg:name arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "sorted_indices", + "source_code": "def sorted_indices(self):\n A = self.copy()\n A.sort_indices()\n return A", + "docstring": "Return a copy of this array/matrix with sorted indices", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:sorted_indices arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "recursive_undo", + "source_code": "def recursive_undo(self, sched=None):\n scheds = self if sched is None else sched\n if hasattr(scheds, '_schedulers'):\n for s in scheds._schedulers:\n self.recursive_undo(s)\n elif hasattr(scheds, 'last_epoch'):\n scheds.last_epoch -= 1", + "docstring": "Recursively undo any step performed by the initialisation of schedulers.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:recursive_undo arg:self arg:sched arguments arg arg Assign Compare If Call For Call If Call" + }, + { + "library": "tensorflow", + "name": "_verify_tf_condition", + "source_code": "def _verify_tf_condition(cond, tag):\n extra_hint = 'to check for None, use `is not None`'\n cond = tensor_conversion.convert_to_tensor_v2(cond)\n if cond.dtype != dtypes.bool:\n raise ValueError('condition of {} expected to be `tf.bool` scalar, got {}; to use as boolean Tensor, use `tf.cast`; {}'.format(tag, cond, extra_hint))\n if cond.shape is None or cond.shape.ndims is None:\n cond = array_ops.reshape(cond, ())\n elif cond.shape.ndims > 0:\n known_dims = [d for d in cond.shape.as_list() if d is not None]\n if np.prod(known_dims) > 1:\n raise ValueError('condition of {} expected to be `tf.bool` scalar, got {}; {}'.format(tag, cond, extra_hint))\n else:\n cond = array_ops.reshape(cond, ())\n return cond", + "docstring": "Ensures that the condition can be used in a TF control flow.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_verify_tf_condition arg:cond arg:tag arguments arg arg Assign Assign Call If Compare Raise Call Call If BoolOp Compare Compare Assign Call If Compare Assign Call Compare If Compare Call Raise Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "kml", + "source_code": "@property\ndef kml(self):\n if self.hasz:\n substr = '%s,%s,%s '\n else:\n substr = '%s,%s,0 '\n return '%s' % ''.join((substr % self[i] for i in range(len(self)))).strip()", + "docstring": "Return the KML representation for the coordinates.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:kml arg:self arguments arg If Assign Assign Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "soft_margin_loss", + "source_code": "def soft_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n if has_torch_function_variadic(input, target):\n return handle_torch_function(soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction)\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch._C._nn.soft_margin_loss(input, target, reduction_enum)", + "docstring": "Compute the soft margin loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Soft margin loss.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:soft_margin_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_sparse_tensors", + "source_code": "@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION)\ndef _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n return CategoricalColumn.IdWeightPair(inputs.get(self), None)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "construct_lcb_delaunay", + "source_code": "def construct_lcb_delaunay(self, v_min, ind=None):\n cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]\n return cbounds", + "docstring": "Construct locally (approximately) convex bounds Parameters ---------- v_min : Vertex object The minimizer vertex Returns ------- cbounds : list of lists List of size dimension with length-2 list of bounds for each dimension.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo.py", + "ast_data": "FunctionDef name:construct_lcb_delaunay arg:self arg:v_min arg:ind arguments arg arg arg Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "bench_scikit_tree_regressor", + "source_code": "def bench_scikit_tree_regressor(X, Y):\n from sklearn.tree import DecisionTreeRegressor\n gc.collect()\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = datetime.now() - tstart\n scikit_regressor_results.append(delta.seconds + delta.microseconds / mu_second)", + "docstring": "Benchmark with scikit-learn decision tree regressor", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_tree.py", + "ast_data": "FunctionDef name:bench_scikit_tree_regressor arg:X arg:Y arguments arg arg Call Assign Call Assign Call Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "stop", + "source_code": "def stop(self) -> None:\n logger.info('EtcdServer stop method called')\n stop_etcd(self._etcd_proc, self._base_data_dir)", + "docstring": "Stop the server and cleans up auto generated resources (e.g. data dir).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "reduce_std", + "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_std)\ndef reduce_std(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=False, name=None):\n with ops.name_scope(name, 'RaggedReduceStd', [input_tensor, axis]):\n variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)\n return math_ops.sqrt(variance)", + "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:reduce_std arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Assign Call Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "set_xlabels", + "source_code": "def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel('')\n return self", + "docstring": "Label the x axis on the bottom row of the grid.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:set_xlabels arg:self arg:label arg:clear_inner arguments arg arg arg arg If Compare Assign For Call If For Call Return return:yes" + }, + { + "library": "kornia", + "name": "transform_boxes", + "source_code": "def transform_boxes(self, input: Union[Tensor, Boxes], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Boxes]:\n if isinstance(input, Tensor):\n batchsize, frame_num = (input.size(0), input.size(1))\n input = Boxes.from_tensor(input.view(-1, input.size(2), input.size(3), input.size(4)), mode='vertices_plus')\n input = super().transform_boxes(input, params, extra_args=extra_args)\n input = input.data.view(batchsize, frame_num, -1, 4, 2)\n else:\n input = super().transform_boxes(input, params, extra_args=extra_args)\n return input", + "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:transform_boxes arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "antiderivative", + "source_code": "def antiderivative(self, nu=1):\n if nu <= 0:\n return self.derivative(-nu)\n if nu > 1:\n bp = self\n for k in range(nu):\n bp = bp.antiderivative()\n return bp\n c, x = (self.c, self.x)\n k = c.shape[0]\n c2 = np.zeros((k + 1,) + c.shape[1:], dtype=c.dtype)\n c2[1:, ...] = np.cumsum(c, axis=0) / k\n delta = x[1:] - x[:-1]\n c2 *= delta[(None, slice(None)) + (None,) * (c.ndim - 2)]\n c2[:, 1:] += np.cumsum(c2[k, :], axis=0)[:-1]\n if self.extrapolate == 'periodic':\n extrapolate = False\n else:\n extrapolate = self.extrapolate\n return self.construct_fast(c2, x, extrapolate, axis=self.axis)", + "docstring": "Construct a new piecewise polynomial representing the antiderivative. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e., compute the first integral. If negative, the derivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k + nu representing the antiderivative of this polynomial. Notes ----- If antiderivative is computed and ``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg If Compare Return return:yes Call If Compare Assign For Call Assign Call Return return:yes Assign Assign Assign Call Assign Call Assign Call Call If Compare Assign Assign Return return:yes Call" + }, + { + "library": "django", + "name": "regex_lookup", + "source_code": "def regex_lookup(self, lookup_type):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')", + "docstring": "Return the string to use in a query when performing regular expression lookups (using \"regex\" or \"iregex\"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:regex_lookup arg:self arg:lookup_type arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "get_gencode_flags", + "source_code": "def get_gencode_flags() -> str:\n arch_list = get_arch_list()\n if len(arch_list) == 0:\n return ''\n arch_list_ = [arch.split('_') for arch in arch_list]\n return ' '.join([f'-gencode compute=compute_{arch},code={kind}_{arch}' for kind, arch in arch_list_])", + "docstring": "Return NVCC gencode flags this library was compiled with.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:get_gencode_flags arguments Assign Call If Compare Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_references", + "source_code": "def get_references(state, model_tuple, field_tuple=()):\n for state_model_tuple, model_state in state.models.items():\n for name, field in model_state.fields.items():\n reference = field_references(state_model_tuple, field, model_tuple, *field_tuple)\n if reference:\n yield (model_state, name, field, reference)", + "docstring": "Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated.", + "type": "function", + "file_path": "django\\django\\db\\migrations\\utils.py", + "ast_data": "FunctionDef name:get_references arg:state arg:model_tuple arg:field_tuple arguments arg arg arg For Call For Call Assign Call If" + }, + { + "library": "scikit-learn", + "name": "_validate_n_bins", + "source_code": "def _validate_n_bins(self, n_features):\n orig_bins = self.n_bins\n if isinstance(orig_bins, Integral):\n return np.full(n_features, orig_bins, dtype=int)\n n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)\n if n_bins.ndim > 1 or n_bins.shape[0] != n_features:\n raise ValueError('n_bins must be a scalar or array of shape (n_features,).')\n bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)\n violating_indices = np.where(bad_nbins_value)[0]\n if violating_indices.shape[0] > 0:\n indices = ', '.join((str(i) for i in violating_indices))\n raise ValueError('{} received an invalid number of bins at indices {}. Number of bins must be at least 2, and must be an int.'.format(KBinsDiscretizer.__name__, indices))\n return n_bins", + "docstring": "Returns n_bins_, the number of bins per feature.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py", + "ast_data": "FunctionDef name:_validate_n_bins arg:self arg:n_features arguments arg arg Assign If Call Return return:yes Call Assign Call If BoolOp Compare Compare Raise Call Assign Compare Compare Assign Call If Compare Assign Call Call Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "bind_symbols", + "source_code": "def bind_symbols(self, placeholders: Sequence[FakeTensor], args: Sequence[Tensor]) -> dict[sympy.Symbol, int]:\n bindings: dict[sympy.Symbol, int] = {}\n\n def bind_symint(arg: object, val: object) -> None:\n if isinstance(val, SymInt):\n assert isinstance(arg, int)\n s = val.node.expr\n if isinstance(s, sympy.Symbol):\n if s in bindings:\n assert bindings[s] == arg, f'{bindings[s]} != {arg}'\n else:\n bindings[s] = arg\n elif isinstance(-s, sympy.Symbol):\n if -s in bindings:\n assert bindings[-s] == -arg, f'{bindings[-s]} != {-arg}'\n else:\n bindings[-s] = -arg\n for t, arg in zip(placeholders, args):\n if t is None:\n continue\n if isinstance(t, SymInt):\n bind_symint(arg, t)\n continue\n assert isinstance(t, torch.Tensor)\n for i, s in enumerate(t.size()):\n bind_symint(arg.size(i), s)\n for i, s in enumerate(t.stride()):\n bind_symint(arg.stride(i), s)\n bind_symint(arg.storage_offset(), t.storage_offset())\n return bindings", + "docstring": "Given a paired list of placeholders (fake tensors with symbolic sizes) and concrete arguments (regular tensors with real sizes), returns a dictionary mapping each symbol to its real value. So for example, if you have a placeholder with size (s0, s1), binding (2, 4) to it will give you {s0: 2, s1: 4}. This is not guaranteed to bind ALL symbols in the ShapeEnv; we can't bind a symbol if it doesn't occur in any placeholder, and symbols that already have replacements won't get bindings. This is a little duplicative with evaluate_guards but it's different enough that it seemed cleanest to make another copy. This assumes the guards are already checked, though if it's cheap we'll check for shenanigans", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:bind_symbols arg:self arg:placeholders arg:args arguments arg arg arg FunctionDef name:bind_symint arg:arg arg:val arguments arg arg If Call Call Assign If Call If Compare Compare Assign If Call If Compare Compare Assign For Call If Compare If Call Call Call For Call Call Call Call For Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "fill_value", + "source_code": "@property\ndef fill_value(self):\n if self._fill_value is None:\n self._fill_value = _check_fill_value(None, self.dtype)\n if isinstance(self._fill_value, ndarray):\n return self._fill_value[()]\n return self._fill_value", + "docstring": "The filling value of the masked array is a scalar. When setting, None will set to a default based on the data type. Examples -------- >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... np.int64(999999) np.int64(999999) np.float64(1e+20) np.complex128(1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value np.float64(-inf) >>> x.fill_value = np.pi >>> x.fill_value np.float64(3.1415926535897931) Reset to default: >>> x.fill_value = None >>> x.fill_value np.float64(1e+20)", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:fill_value arg:self arguments arg If Compare Assign Call If Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "take_nd", + "source_code": "def take_nd(arr: ArrayLike, indexer, axis: AxisInt=0, fill_value=lib.no_default, allow_fill: bool=True) -> ArrayLike:\n if fill_value is lib.no_default:\n fill_value = na_value_for_dtype(arr.dtype, compat=False)\n elif lib.is_np_dtype(arr.dtype, 'mM'):\n dtype, fill_value = maybe_promote(arr.dtype, fill_value)\n if arr.dtype != dtype:\n arr = arr.astype(dtype)\n if not isinstance(arr, np.ndarray):\n if not is_1d_only_ea_dtype(arr.dtype):\n arr = cast('NDArrayBackedExtensionArray', arr)\n return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis)\n return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)\n arr = np.asarray(arr)\n return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)", + "docstring": "Specialized Cython take which sets NaN values in one pass This dispatches to `` defined on ExtensionArrays. Note: this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. Parameters ---------- arr : np.ndarray or ExtensionArray Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indices are filed with fill_value axis : int, default 0 Axis to take from fill_value : any, default np.nan Fill value to replace -1 values with allow_fill : bool, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. Returns ------- subarray : np.ndarray or ExtensionArray May be the same type as the input, or cast to an ndarray.", + "type": "function", + "file_path": "pandas\\pandas\\core\\array_algos\\take.py", + "ast_data": "FunctionDef name:take_nd arg:arr arg:indexer arg:axis arg:fill_value arg:allow_fill arguments arg arg arg arg arg If Compare Assign Call If Call Assign Call If Compare Assign Call If Call If Call Assign Call Return return:yes Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "apply_non_transform_box", + "source_code": "def apply_non_transform_box(self, input: Boxes, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Boxes:\n return input", + "docstring": "Process boxes corresponding to the inputs that are no transformation applied.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py", + "ast_data": "FunctionDef name:apply_non_transform_box arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_fw_post_hook", + "source_code": "def _fw_post_hook(self, mod, input, output):\n super()._fw_post_hook(mod, input, output)", + "docstring": "This function is called when the forward pass of a module is called. It updates the module tracker and removes the module from parent data", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py", + "ast_data": "FunctionDef name:_fw_post_hook arg:self arg:mod arg:input arg:output arguments arg arg arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "_interval_contains_close", + "source_code": "def _interval_contains_close(interval, val, rtol=1e-10):\n a, b = interval\n if a > b:\n a, b = (b, a)\n rtol = (b - a) * rtol\n return a - rtol <= val <= b + rtol", + "docstring": "Check, inclusively, whether an interval includes a given value, with the interval expanded by a small tolerance to admit floating point errors. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. rtol : float, default: 1e-10 Relative tolerance slippage allowed outside of the interval. For an interval `` are considered inside the interval. Returns ------- bool Whether *val* is within the *interval* (with tolerance).", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:_interval_contains_close arg:interval arg:val arg:rtol arguments arg arg arg Assign If Compare Assign Assign Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "expand_dims", + "source_code": "def expand_dims(a: Array, /, *, axis: int | tuple[int, ...]=(0,), xp: ModuleType | None=None) -> Array:\n if xp is None:\n xp = array_namespace(a)\n if not isinstance(axis, tuple):\n axis = (axis,)\n ndim = a.ndim + len(axis)\n if axis != () and (min(axis) < -ndim or max(axis) >= ndim):\n err_msg = f'a provided axis position is out of bounds for array of dimension {a.ndim}'\n raise IndexError(err_msg)\n axis = tuple((dim % ndim for dim in axis))\n if len(set(axis)) != len(axis):\n err_msg = 'Duplicate dimensions specified in `axis`.'\n raise ValueError(err_msg)\n for i in sorted(axis):\n a = xp.expand_dims(a, axis=i)\n return a", + "docstring": "Expand the shape of an array. Insert (a) new axis/axes that will appear at the position(s) specified by in the expanded array shape. This is `axisaa` may also be a tuple: >>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp) >>> y Array([[[1, 2]]], dtype=array_api_strict.int64) >>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp) >>> y Array([[[1], [2]]], dtype=array_api_strict.int64)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", + "ast_data": "FunctionDef name:expand_dims arguments arg arg arg If Compare Assign Call If Call Assign Assign Call If BoolOp Compare BoolOp Compare Call Compare Call Assign Raise Call Assign Call If Compare Call Call Call Assign Raise Call For Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "ExtremeFinderSimple", + "source_code": "class ExtremeFinderSimple:\n\n def __init__(self, nx, ny):\n self.nx = nx\n self.ny = ny\n\n def __call__(self, transform_xy, x1, y1, x2, y2):\n tbbox = self._find_transformed_bbox(_User2DTransform(transform_xy, None), Bbox.from_extents(x1, y1, x2, y2))\n return (tbbox.x0, tbbox.x1, tbbox.y0, tbbox.y1)\n\n def _find_transformed_bbox(self, trans, bbox):\n grid = np.reshape(np.meshgrid(np.linspace(bbox.x0, bbox.x1, self.nx), np.linspace(bbox.y0, bbox.y1, self.ny)), (2, -1)).T\n tbbox = Bbox.null()\n tbbox.update_from_data_xy(trans.transform(grid))\n return tbbox.expanded(1 + 2 / self.nx, 1 + 2 / self.ny)", + "docstring": "A helper class to figure out the range of grid lines that need to be drawn.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py", + "ast_data": "ClassDef name:ExtremeFinderSimple FunctionDef name:__init__ arg:self arg:nx arg:ny arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:transform_xy arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg Assign Call Call Call Return return:yes FunctionDef name:_find_transformed_bbox arg:self arg:trans arg:bbox arguments arg arg arg Assign Call Call Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "database_forwards", + "source_code": "def database_forwards(self, app_label, schema_editor, from_state, to_state):\n raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')", + "docstring": "Perform the mutation on the database schema in the normal (forwards) direction.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\operations\\base.py", + "ast_data": "FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Raise Call" + }, + { + "library": "pandas", + "name": "find_common_type", + "source_code": "def find_common_type(types):\n if not types:\n raise ValueError('no types given')\n first = types[0]\n if lib.dtypes_all_equal(list(types)):\n return first\n types = list(dict.fromkeys(types).keys())\n if any((isinstance(t, ExtensionDtype) for t in types)):\n for t in types:\n if isinstance(t, ExtensionDtype):\n res = t._get_common_dtype(types)\n if res is not None:\n return res\n return np.dtype('object')\n if all((lib.is_np_dtype(t, 'M') for t in types)):\n return np.dtype(max(types))\n if all((lib.is_np_dtype(t, 'm') for t in types)):\n return np.dtype(max(types))\n has_bools = any((t.kind == 'b' for t in types))\n if has_bools:\n for t in types:\n if t.kind in 'iufc':\n return np.dtype('object')\n return np_find_common_type(*types)", + "docstring": "Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:find_common_type arg:types arguments arg If Raise Call Assign If Call Call Return return:yes Assign Call Call Call If Call Call For If Call Assign Call If Compare Return return:yes Return return:yes Call If Call Call Return return:yes Call Call If Call Call Return return:yes Call Call Assign Call Compare If For If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "BaseArchiveIndexView", + "source_code": "class BaseArchiveIndexView(BaseDateListView):\n context_object_name = 'latest'\n\n def get_dated_items(self):\n qs = self.get_dated_queryset()\n date_list = self.get_date_list(qs, ordering='DESC')\n if not date_list:\n qs = qs.none()\n return (date_list, qs, {})", + "docstring": "Base view for archives of date-based items. This requires subclassing to provide a response mixin.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:BaseArchiveIndexView Assign FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "prune_unconnected_ops_from_xla", + "source_code": "def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):\n for graph in [prune_graph] + [f for f in prune_graph._functions.values()]:\n if not isinstance(graph, ops.Graph):\n continue\n for op in graph.get_operations():\n if op.type not in _UNCONNECTED_OPS_TO_PRUNE:\n continue\n outputs_consumed = False\n for output in op.outputs:\n if output.consumers():\n outputs_consumed = True\n break\n if not outputs_consumed:\n logging.info('Pruning OP %s of type %s from XLA Compile due to it being disconnected.', op.name, op.type)\n op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR)", + "docstring": "Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. Args: prune_graph: A tensorflow graph from which we wish to prune unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have no inputs and no consumers. These can often be left behind due to graph construction rewiring (for instance TF-Hub). While they never execute, they will cause XLA compile to fail so we strip them from XLA compile by removing the tpu_replicate attribute.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:prune_unconnected_ops_from_xla arg:prune_graph arguments arg For Call If Call For Call If Compare Assign For If Call Assign If Call Call" + }, + { + "library": "tensorflow", + "name": "AggregationMethod", + "source_code": "@tf_export('AggregationMethod')\nclass AggregationMethod:\n ADD_N = 0\n DEFAULT = ADD_N\n EXPERIMENTAL_TREE = 1\n EXPERIMENTAL_ACCUMULATE_N = 2", + "docstring": "A class listing aggregation methods used to combine gradients. Computing partial derivatives can require aggregating gradient contributions. This class lists the various methods that can be used to combine gradients in the graph. The following aggregation methods are part of the stable API for aggregating gradients: * : All of the gradient terms are summed as part of one operation using the \"AddN\" op (see ). This method has the property that all gradients must be ready and buffered separately in memory before any aggregation is performed. * : The system-chosen default aggregation method. The following aggregation methods are experimental and may not be supported in future releases: * : Gradient terms are summed in pairs using the \"AddN\" op. This method of summing gradients may reduce performance, but it can improve memory utilization because the gradients can be released earlier. * : Same as . Example usage when computing gradient: >>> @tf.function ... def example(): ... x = tf.constant(1.0) ... y = x * 2.0 ... z = y + y + y + y ... return tf.gradients(z, [x, y], ... aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) >>> example() [, ]", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py", + "ast_data": "ClassDef name:AggregationMethod Assign Assign Assign Assign Call" + }, + { + "library": "seaborn", + "name": "position_candidates", + "source_code": "def position_candidates(self, xyr_i, neighbors):\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = ((x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i))\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)", + "docstring": "Return a list of coordinates that might be valid by adjusting x.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:position_candidates arg:self arg:xyr_i arg:neighbors arguments arg arg arg Assign Assign Assign For Assign Assign Call Call Assign If Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_output_details", + "source_code": "def get_output_details(self):\n return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.OutputIndices()]", + "docstring": "Gets model output tensor details. Returns: A list in which each item is a dictionary with details about an output tensor. The dictionary contains the same fields as described for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:get_output_details arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "nanstd", + "source_code": "@bottleneck_switch(ddof=1)\ndef nanstd(values, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask=None):\n if values.dtype == 'M8[ns]':\n values = values.view('m8[ns]')\n orig_dtype = values.dtype\n values, mask = _get_values(values, skipna, mask=mask)\n result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))\n return _wrap_results(result, orig_dtype)", + "docstring": "Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s.values) 1.0", + "type": "function", + "file_path": "pandas\\pandas\\core\\nanops.py", + "ast_data": "FunctionDef name:nanstd arg:values arguments arg arg arg arg arg If Compare Assign Call Assign Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "synchronize", + "source_code": "def synchronize(device: Optional[_device_t]=None) -> None:\n with torch.mtia.device(device):\n return torch._C._mtia_deviceSynchronize()", + "docstring": "Waits for all jobs in all streams on a MTIA device to complete.", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:synchronize arg:device arguments arg With Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "parse_implicit_response", + "source_code": "def parse_implicit_response(uri, state=None):\n fragment = urlparse.urlparse(uri).fragment\n params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True))\n if 'access_token' not in params:\n raise MissingTokenException()\n if 'token_type' not in params:\n raise MissingTokenTypeException()\n if state and params.get('state', None) != state:\n raise MismatchingStateException()\n return params", + "docstring": "Parse the implicit token response URI into a dict. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the `` format: **access_token** REQUIRED. The access token issued by the authorization server. **token_type** REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. **expires_in** RECOMMENDED. The lifetime in seconds of the access token. For example, the value \"3600\" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. **scope** OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by Section 3.3. **state** REQUIRED if the \"state\" parameter was present in the client authorization request. The exact value received from the client. Similar to the authorization code response, but with a full token provided in the URL fragment: .. code-block:: http HTTP/1.1 302 Found Location: &state=xyz&token_type=example&expires_in=3600", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\parameters.py", + "ast_data": "FunctionDef name:parse_implicit_response arg:uri arg:state arguments arg arg Assign Call Assign Call Call If Compare Raise Call If Compare Raise Call If BoolOp Compare Call Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "tck", + "source_code": "@property\ndef tck(self):\n return (self.t, self.c, self.k)", + "docstring": "Equivalent to `` (read-only).", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:tck arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "multiplier", + "source_code": "@property\ndef multiplier(self):\n return self._multiplier", + "docstring": "The [batch] scalar , in .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py", + "ast_data": "FunctionDef name:multiplier arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "unproject", + "source_code": "def unproject(self, points: Vector2, depth: Tensor) -> Vector3:\n return self.projection.unproject(self.distortion.undistort(self.params, points), depth)", + "docstring": "Unprojects 2D points from camera plane to 3D. Args: points: Vector2 representing 2D points. depth: Depth of the points. Returns: Vector3 representing the unprojected 3D points. Example: >>> points = Vector2(torch.Tensor([1.0, 1.0])) >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam.unproject(points, torch.Tensor([1.0])) x: tensor([-0.9726]) y: tensor([-0.7287]) z: tensor([1.])", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:unproject arg:self arg:points arg:depth arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "allows_duplicate_labels", + "source_code": "@property\ndef allows_duplicate_labels(self) -> bool:\n return self._allows_duplicate_labels", + "docstring": "Whether this object allows duplicate labels. Setting `duplicates` for more. See Also -------- DataFrame.attrs : Set global metadata on this object. DataFrame.set_flags : Set global flags on this object. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"a\"]) >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Traceback (most recent call last): ... pandas.errors.DuplicateLabelError: Index has duplicates. positions label a [0, 1]", + "type": "method", + "file_path": "pandas\\pandas\\core\\flags.py", + "ast_data": "FunctionDef name:allows_duplicate_labels arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_cleanup_fontproperties_init", + "source_code": "def _cleanup_fontproperties_init(init_method):\n\n @functools.wraps(init_method)\n def wrapper(self, *args, **kwargs):\n if len(args) > 1 or (len(args) == 1 and kwargs):\n _api.warn_deprecated('3.10', message='Passing individual properties to FontProperties() positionally was deprecated in Matplotlib %(since)s and will be removed in %(removal)s. Please pass all properties via keyword arguments.')\n if len(args) == 1 and (not kwargs) and (not cbook.is_scalar_or_string(args[0])):\n _api.warn_deprecated('3.10', message='Passing family as positional argument to FontProperties() was deprecated in Matplotlib %(since)s and will be removed in %(removal)s. Please pass family names as keywordargument.')\n return init_method(self, *args, **kwargs)\n return wrapper", + "docstring": "A decorator to limit the call signature to single a positional argument or alternatively only keyword arguments. We still accept but deprecate all other call signatures. When the deprecation expires we can switch the signature to:: __init__(self, pattern=None, /, *, family=None, style=None, ...) plus a runtime check that pattern is not used alongside with the keyword arguments. This results eventually in the two possible call signatures:: FontProperties(pattern) FontProperties(family=..., size=..., ...)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:_cleanup_fontproperties_init arg:init_method arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg If BoolOp Compare Call BoolOp Compare Call Call If BoolOp Compare Call Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_BisectingTree", + "source_code": "class _BisectingTree:\n\n def __init__(self, center, indices, score):\n self.center = center\n self.indices = indices\n self.score = score\n self.left = None\n self.right = None\n\n def split(self, labels, centers, scores):\n self.left = _BisectingTree(indices=self.indices[labels == 0], center=centers[0], score=scores[0])\n self.right = _BisectingTree(indices=self.indices[labels == 1], center=centers[1], score=scores[1])\n self.indices = None\n\n def get_cluster_to_bisect(self):\n max_score = None\n for cluster_leaf in self.iter_leaves():\n if max_score is None or cluster_leaf.score > max_score:\n max_score = cluster_leaf.score\n best_cluster_leaf = cluster_leaf\n return best_cluster_leaf\n\n def iter_leaves(self):\n if self.left is None:\n yield self\n else:\n yield from self.left.iter_leaves()\n yield from self.right.iter_leaves()", + "docstring": "Tree structure representing the hierarchical clusters of BisectingKMeans.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py", + "ast_data": "ClassDef name:_BisectingTree FunctionDef name:__init__ arg:self arg:center arg:indices arg:score arguments arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:split arg:self arg:labels arg:centers arg:scores arguments arg arg arg arg Assign Call Compare Assign Call Compare Assign FunctionDef name:get_cluster_to_bisect arg:self arguments arg Assign For Call If BoolOp Compare Compare Assign Assign Return return:yes FunctionDef name:iter_leaves arg:self arguments arg If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "scatter_nd_sub", + "source_code": "def scatter_nd_sub(self, indices, updates, name=None):\n return gen_state_ops.scatter_nd_sub(self._variable, indices, updates, use_locking=True, name=name)", + "docstring": "Applies sparse subtraction to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered subtraction has completed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:scatter_nd_sub arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "BenchmarkTensors", + "source_code": "@dataclasses.dataclass\nclass BenchmarkTensors:\n input_tensors: list[torch.Tensor]\n output_tensor: Optional[torch.Tensor]\n\n def unpack(self):\n return (self.input_tensors, self.output_tensor)", + "docstring": "Represents a set of inputs and outputs for autotuning with a template", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "ClassDef name:BenchmarkTensors FunctionDef name:unpack arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_kwargs_to_dict", + "source_code": "def _kwargs_to_dict(self, node):\n if node.keywords:\n return gast.Call(gast.Name('dict', ctx=gast.Load(), annotation=None, type_comment=None), args=(), keywords=node.keywords)\n else:\n return parser.parse_expression('None')", + "docstring": "Ties together all keyword and **kwarg arguments in a single dict.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py", + "ast_data": "FunctionDef name:_kwargs_to_dict arg:self arg:node arguments arg arg If Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__setitem__", + "source_code": "def __setitem__(self, key, value):\n if not self.loaded:\n self.load()\n self._data[key] = value", + "docstring": "Store an object in the session.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg If Call Assign" + }, + { + "library": "pytorch", + "name": "_gen_param_group_key", + "source_code": "def _gen_param_group_key(param_keys: list[str]) -> str:\n return '/'.join(sorted(param_keys))", + "docstring": "Concatenate all param keys as a unique indentifier for one param group.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py", + "ast_data": "FunctionDef name:_gen_param_group_key arg:param_keys arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "restore", + "source_code": "def restore(self, save_path, options=None):\n self._checkpoint_options = copy.copy(options) if options else self._checkpoint_options\n if self._checkpoint_options:\n self._checkpoint_options.experimental_enable_async_checkpoint = False\n self._queue.join()\n status = self.checkpointer().restore(save_path, self._checkpoint_options)\n return status", + "docstring": "Restore the checkpointed variables. Args: save_path: The full name of the checkpoint file to be restored. options: CheckpointOption instance. Returns: A load status object, which can be used to make assertions about the status of a checkpoint restoration. See tf.train.Checkpoint.restore() for more details.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py", + "ast_data": "FunctionDef name:restore arg:self arg:save_path arg:options arguments arg arg arg Assign Call If Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "unary_elementwise_apis", + "source_code": "def unary_elementwise_apis():\n return tuple(_UNARY_ELEMENTWISE_APIS)", + "docstring": "Returns a list of APIs that have been registered as unary elementwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:unary_elementwise_apis arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "from_float", + "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)", + "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py", + "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "weight_init", + "source_code": "def weight_init(m: nn.Module) -> None:\n if isinstance(m, (nn.Conv2d,)):\n torch.nn.init.xavier_normal_(m.weight, gain=1.0)\n if m.weight.data.shape[1] == torch.Size([1]):\n torch.nn.init.normal_(m.weight, mean=0.0)\n if m.bias is not None:\n torch.nn.init.zeros_(m.bias)\n if isinstance(m, (nn.ConvTranspose2d,)):\n torch.nn.init.xavier_normal_(m.weight, gain=1.0)\n if m.weight.data.shape[1] == torch.Size([1]):\n torch.nn.init.normal_(m.weight, std=0.1)\n if m.bias is not None:\n torch.nn.init.zeros_(m.bias)", + "docstring": "Initialize weights.", + "type": "function", + "file_path": "kornia\\kornia\\filters\\dexined.py", + "ast_data": "FunctionDef name:weight_init arg:m arguments arg If Call Call If Compare Call Call If Compare Call If Call Call If Compare Call Call If Compare Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-07):\n dim, eigs = self._process_parameters(eigs, tol=tol)\n random_state = self._get_random_state(random_state)\n m = ortho_group.rvs(dim, random_state=random_state)\n m = np.dot(np.dot(m, np.diag(eigs)), m.T)\n m = self._to_corr(m)\n if abs(m.diagonal() - 1).max() > diag_tol:\n raise RuntimeError('Failed to generate a valid correlation matrix')\n return m", + "docstring": "Draw random correlation matrices. Parameters ---------- eigs : 1d ndarray Eigenvalues of correlation matrix tol : float, optional Tolerance for input parameter checks diag_tol : float, optional Tolerance for deviation of the diagonal of the resulting matrix. Default: 1e-7 Raises ------ RuntimeError Floating point error prevented generating a valid correlation matrix. Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim), each having eigenvalues eigs.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:eigs arg:random_state arg:tol arg:diag_tol arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call Assign Call If Compare Call Call Call Raise Call Return return:yes" + }, + { + "library": "numpy", + "name": "X11NotFoundError", + "source_code": "class X11NotFoundError(NotFoundError):\n pass", + "docstring": "X11 libraries not found.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:X11NotFoundError" + }, + { + "library": "tensorflow", + "name": "_get_target_dtype", + "source_code": "def _get_target_dtype(values, dtype=None, dtype_hint=None):\n if dtype is not None:\n return dtype\n for value in values:\n if isinstance(value, tensor_lib.Tensor):\n return value.dtype\n for value in values:\n if isinstance(value, np.ndarray):\n return dtypes.as_dtype(value.dtype)\n if dtype_hint is not None:\n return dtype_hint\n return dtypes.int64", + "docstring": "Gets the target dtype of a family of values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_get_target_dtype arg:values arg:dtype arg:dtype_hint arguments arg arg arg If Compare Return return:yes For If Call Return return:yes For If Call Return return:yes Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "maybe_do_strip", + "source_code": "def maybe_do_strip(node: node_def_pb2.NodeDef) -> None:\n if node.op == 'Assert' or node.op == 'PrintV2':\n node.op = 'NoOp'\n erase_regular_node_attributes(node)\n new_inputs = []\n for inp in node.input:\n if not is_control_input(inp):\n new_inputs.append(as_control_dep(inp))\n else:\n new_inputs.append(inp)\n node.ClearField('input')\n node.input.extend(new_inputs)\n elif node.op == 'CheckNumerics' or node.op == 'Print':\n node.op = 'Identity'\n prune_all_non_t_attributes(node)\n for i in range(1, len(node.input)):\n if not is_control_input(node.input[i]):\n node.input[i] = as_control_dep(node.input[i])", + "docstring": "Strips the graph from Assert and CheckNumerics ops. For Assert ops, this function also rewrites all of the inputs to the nodes that were transformed by making them into control dependencies. It also removes all of the regular node attributes, that is all node attributes that do not start with . For CheckNumerics ops, this function turns the op into an Identity op, which will be pruned later (according to the original implementation in grappler's . Then, since Identity ops only take one input, it leaves the first input as is while transforming the other ones into control dependencies. Args: node: The node to potentally strip.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:maybe_do_strip arg:node arguments arg If BoolOp Compare Compare Assign Call Assign For If Call Call Call Call Call Call If BoolOp Compare Compare Assign Call For Call Call If Call Assign Call" + }, + { + "library": "matplotlib", + "name": "_on_size", + "source_code": "def _on_size(self, event):\n self._update_device_pixel_ratio()\n _log.debug('%s - _on_size()', type(self))\n sz = self.GetParent().GetSizer()\n if sz:\n si = sz.GetItem(self)\n if sz and si and (not si.Proportion) and (not si.Flag & wx.EXPAND):\n size = self.GetMinSize()\n else:\n size = self.GetClientSize()\n size.IncTo(self.GetMinSize())\n if getattr(self, '_width', None):\n if size == (self._width, self._height):\n return\n self._width, self._height = size\n self._isDrawn = False\n if self._width <= 1 or self._height <= 1:\n return\n dpival = self.figure.dpi\n if not wx.Platform == '__WXMSW__':\n scale = self.GetDPIScaleFactor()\n dpival /= scale\n winch = self._width / dpival\n hinch = self._height / dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n self.Refresh(eraseBackground=False)\n ResizeEvent('resize_event', self)._process()\n self.draw_idle()", + "docstring": "Called when wxEventSize is generated. In this application we attempt to resize to fit the window, so it is better to take the performance hit and redraw the whole window.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:_on_size arg:self arg:event arguments arg arg Call Call Call Assign Call Call If Assign Call If BoolOp Assign Call Assign Call Call Call If Call If Compare Return return:no Assign Assign If BoolOp Compare Compare Return return:no Assign If Compare Assign Call Assign Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "saveables", + "source_code": "@property\ndef saveables(self):\n return self._saveables", + "docstring": "Returns a list of SaveableObjects generated from the Trackable object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:saveables arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "run_restore_ops", + "source_code": "def run_restore_ops(self, session=None):\n if context.executing_eagerly():\n return\n if session is None:\n session = get_session()\n session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)", + "docstring": "Run operations to restore objects in the dependency graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Call" + }, + { + "library": "tensorflow", + "name": "check_alive", + "source_code": "def check_alive(self, worker_name):\n if self._context_handle:\n return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)\n else:\n raise ValueError('Context is not initialized.')", + "docstring": "Checks whether a remote worker is alive or not. Args: worker_name: a string representing the remote worker. It must be a fully specified name like \"/job:worker/replica:0/task:0\". Returns: a boolean indicating whether the remote worker is alive or not. Raises: ValueError: if context is not initialized.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:check_alive arg:self arg:worker_name arguments arg arg If Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "post_unshard", + "source_code": "def post_unshard(self):\n if self._uses_param_mixed_precision and self.uses_sharded_strategy:\n self._free_low_precision_sharded_param()\n self._check_on_compute_device(self.flat_param)", + "docstring": "Run the post-unshard logic. This includes freeing the low precision shard if needed.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:post_unshard arg:self arguments arg If BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "__enter__", + "source_code": "def __enter__(self):\n if self._device_scope is not None:\n raise AssertionError('Re-entered a ParallelDevice scope without first exiting it.')\n self._assert_eager()\n self._device_scope = ops.device(self._name)\n self._device_scope.__enter__()\n return self", + "docstring": "Runs ops in parallel, makes variables which save independent buffers.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py", + "ast_data": "FunctionDef name:__enter__ arg:self arguments arg If Compare Raise Call Call Assign Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "put_mpint", + "source_code": "def put_mpint(self, val: int) -> None:\n self.put_sshstr(_to_mpint(val))", + "docstring": "Big-endian bigint prefixed with u32 length", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:put_mpint arg:self arg:val arguments arg arg Call Call" + }, + { + "library": "pandas", + "name": "column_data_offsets", + "source_code": "def column_data_offsets(self) -> np.ndarray:\n return np.asarray(self._column_data_offsets, dtype=np.int64)", + "docstring": "Return a numpy int64 array of the column offsets", + "type": "method", + "file_path": "pandas\\pandas\\io\\sas\\sas7bdat.py", + "ast_data": "FunctionDef name:column_data_offsets arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "legdiv", + "source_code": "def legdiv(c1, c2):\n return pu._div(legmul, c1, c2)", + "docstring": "Divide one Legendre series by another. Returns the quotient-with-remainder of two Legendre series / . The arguments are sequences of coefficients from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Legendre series coefficients ordered from low to high. Returns ------- quo, rem : ndarrays Of Legendre series coefficients representing the quotient and remainder. See Also -------- legadd, legsub, legmulx, legmul, legpow Notes ----- In general, the (polynomial) division of one Legendre series by another results in quotient and remainder terms that are not in the Legendre polynomial basis set. Thus, to express these results as a Legendre series, it is necessary to \"reproject\" the results onto the Legendre basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legdiv(c1,c2) # quotient \"intuitive,\" remainder not (array([3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> L.legdiv(c2,c1) # neither \"intuitive\" (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legdiv arg:c1 arg:c2 arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_grid_dict", + "source_code": "def get_grid_dict(patch_size: int=32) -> Dict[str, Tensor]:\n kgrid = create_meshgrid(height=patch_size, width=patch_size, normalized_coordinates=True)\n x = kgrid[0, :, :, 0]\n y = kgrid[0, :, :, 1]\n rho, phi = cart2pol(x, y)\n grid_dict = {'x': x, 'y': y, 'rho': rho, 'phi': phi}\n return grid_dict", + "docstring": "Get cartesian and polar parametrizations of grid.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\mkd.py", + "ast_data": "FunctionDef name:get_grid_dict arg:patch_size arguments arg Assign Call Assign Assign Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "kolmogn", + "source_code": "def kolmogn(n, x, cdf=True):\n it = np.nditer([n, x, cdf, None], flags=['zerosize_ok'], op_dtypes=[None, np.float64, np.bool_, np.float64])\n for _n, _x, _cdf, z in it:\n if np.isnan(_n):\n z[...] = _n\n continue\n if int(_n) != _n:\n raise ValueError(f'n is not integral: {_n}')\n z[...] = _kolmogn(int(_n), _x, cdf=_cdf)\n result = it.operands[-1]\n return result", + "docstring": "Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), for a sample of size n drawn from a distribution with CDF F(t), where :math:, and :math: is the Empirical Cumulative Distribution Function of the sample. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 cdf : bool, optional whether to compute the CDF(default=true) or the SF. Returns ------- cdf : ndarray CDF (or SF it cdf is False) at the specified locations. The return value has shape the result of numpy broadcasting n and x.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_ksstats.py", + "ast_data": "FunctionDef name:kolmogn arg:n arg:x arg:cdf arguments arg arg arg Assign Call For If Call Assign If Compare Call Raise Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "get_slice_bound", + "source_code": "def get_slice_bound(self, label: Hashable | Sequence[Hashable], side: Literal['left', 'right']) -> int:\n if not isinstance(label, tuple):\n label = (label,)\n return self._partial_tup_index(label, side=side)", + "docstring": "For an ordered MultiIndex, compute slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if `side=='right') position of given label. Parameters ---------- label : object or tuple of objects side : {'left', 'right'} Returns ------- int Index of label. Notes ----- This method only works if level 0 index of the MultiIndex is lexsorted. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list(\"abbc\"), list(\"gefd\")]) Get the locations from the leftmost 'b' in the first level until the end of the multiindex: >>> mi.get_slice_bound(\"b\", side=\"left\") 1 Like above, but if you get the locations from the rightmost 'b' in the first level and 'f' in the second level: >>> mi.get_slice_bound((\"b\", \"f\"), side=\"right\") 3 See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:get_slice_bound arg:self arg:label arg:side arguments arg arg arg If Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "copy_assets_to_destination_dir", + "source_code": "def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files=None):\n if saved_files is None:\n saved_files = set()\n assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir)\n for asset_basename, asset_source_filepath in asset_filename_map.items():\n asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename))\n if file_io.file_exists(asset_source_filepath) and asset_source_filepath != asset_destination_filepath and (asset_destination_filepath not in saved_files):\n file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite=True)\n saved_files.add(asset_destination_filepath)\n tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))", + "docstring": "Copy all assets from source path to destination path. Args: asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. destination_dir: the destination directory that assets are stored in. saved_files: a set of destination filepaths that have already been copied and will be skipped", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:copy_assets_to_destination_dir arg:asset_filename_map arg:destination_dir arg:saved_files arguments arg arg arg If Compare Assign Call Assign Call For Call Assign Call Call Call If BoolOp Call Compare Compare Call Call Call Call" + }, + { + "library": "django", + "name": "_watch_glob", + "source_code": "def _watch_glob(self, directory, patterns):\n prefix = 'glob'\n if not directory.exists():\n if not directory.parent.exists():\n logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)\n return\n prefix = 'glob-parent-%s' % directory.name\n patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]\n directory = directory.parent\n expression = ['anyof']\n for pattern in patterns:\n expression.append(['match', pattern, 'wholename'])\n self._subscribe(directory, '%s:%s' % (prefix, directory), expression)", + "docstring": "Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions.", + "type": "method", + "file_path": "django\\django\\utils\\autoreload.py", + "ast_data": "FunctionDef name:_watch_glob arg:self arg:directory arg:patterns arguments arg arg arg Assign If Call If Call Call Return return:no Assign Assign Assign Assign For Call Call" + }, + { + "library": "django", + "name": "__reduce__", + "source_code": "def __reduce__(self):\n return (getattr, (self.field.model, self.field.name))", + "docstring": "Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, attributes='n_features_in_')\n return _check_feature_names_in(self, input_features)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Same as input features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if len(shape) != 2:\n raise ValueError('Identity matrix initializer can only be used for 2D matrices.')\n initializer = linalg_ops.eye(*shape, dtype=dtype)\n return self.gain * initializer", + "docstring": "Returns a tensor object initialized to a 2D identity matrix. Args: shape: Shape of the tensor. It should have exactly rank 2. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Call Raise Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ActivityRegularization", + "source_code": "class ActivityRegularization(Layer):\n\n def __init__(self, l1=0.0, l2=0.0, **kwargs):\n super(ActivityRegularization, self).__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'l1': self.l1, 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", + "docstring": "Layer that applies an update to the cost function based input activity. Args: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py", + "ast_data": "ClassDef name:ActivityRegularization FunctionDef name:__init__ arg:self arg:l1 arg:l2 arguments arg arg arg arg Call Call Call Assign Assign Assign FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "fprati", + "source_code": "def fprati(p1, f1, p2, f2, p3, f3):\n h1 = f1 * (f2 - f3)\n h2 = f2 * (f3 - f1)\n h3 = f3 * (f1 - f2)\n if p3 == np.inf:\n return -(p2 * h1 + p1 * h2) / h3\n return -(p1 * p2 * h3 + p2 * p3 * h1 + p1 * p3 * h2) / (p1 * h1 + p2 * h2 + p3 * h3)", + "docstring": "The root of r(p) = (u*p + v) / (p + w) given three points and values, (p1, f2), (p2, f2) and (p3, f3). The FITPACK analog adjusts the bounds, and we do not NB: FITPACK uses p < 0 to encode p=infinity. We just use the infinity itself. Since the bracket is ``).", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py", + "ast_data": "FunctionDef name:fprati arg:p1 arg:f1 arg:p2 arg:f2 arg:p3 arg:f3 arguments arg arg arg arg arg arg Assign Assign Assign If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "chebval3d", + "source_code": "def chebval3d(x, y, z, c):\n return pu._valnd(chebval, c, x, y, z)", + "docstring": "Evaluate a 3-D Chebyshev series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) The parameters , , and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either , , and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `xyzxyzcxyz`. See Also -------- chebval, chebval2d, chebgrid2d, chebgrid3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebval3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "var", + "source_code": "def var(self, df, scale):\n dim, df, scale = self._process_parameters(df, scale)\n out = self._var(dim, df, scale)\n return _squeeze_output(out) if out is not None else out", + "docstring": "Variance of the inverse Wishart distribution. Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:var arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "name", + "source_code": "@property\ndef name(self) -> str:\n return self._dtype.name", + "docstring": "A bit-width name for this data-type.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_gen", + "source_code": "def _gen(data):\n index_array = np.arange(num_samples)\n for _ in range(epochs):\n if shuffle:\n np.random.shuffle(index_array)\n batches = generic_utils.make_batches(num_samples, batch_size)\n for batch_start, batch_end in batches:\n batch_ids = index_array[batch_start:batch_end]\n flat_batch_data = training_utils.slice_arrays(nest.flatten(data), batch_ids, contiguous=not shuffle)\n yield nest.pack_sequence_as(data, flat_batch_data)", + "docstring": "Makes a generator out of a structure of NumPy/EagerTensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py", + "ast_data": "FunctionDef name:_gen arg:data arguments arg Assign Call For Call If Call Assign Call For Assign Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_cache_key", + "source_code": "def _cache_key(self, state: _CacheKeyState, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object]) -> _DispatchCacheKey:\n key_values = [func, torch.get_default_dtype(), torch._C._get_default_device(), torch.is_inference_mode_enabled(), self.shape_env.settings if self.shape_env else None]\n if state.known_symbols:\n key_values.append(self.epoch)\n id_hashed_objects: list[object] = []\n if args:\n self._prep_args_for_hash(key_values, args, state, id_hashed_objects)\n if kwargs:\n self._prep_args_for_hash(key_values, kwargs, state, id_hashed_objects)\n key = _DispatchCacheKey(tuple(key_values))\n for id_hashed_obj in id_hashed_objects:\n weakref.finalize(id_hashed_obj, functools.partial(evict_fake_tensor_cache_key, key=key))\n id_hashed_objects.clear()\n return key", + "docstring": "Create a cache key given the dispatch args. Raises _BypassDispatchCache for any situation that precludes caching.", + "type": "method", + "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py", + "ast_data": "FunctionDef name:_cache_key arg:self arg:state arg:func arg:args arg:kwargs arguments arg arg arg arg arg Assign Call Call Call If Call If Call If Call Assign Call Call For Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_use_sharded_flat_param", + "source_code": "def _use_sharded_flat_param(self) -> None:\n flat_param = self.flat_param\n if self._use_orig_params:\n in_forward = self._training_state == HandleTrainingState.FORWARD\n skip_use_sharded_views = torch.is_grad_enabled() and in_forward and (self._sharding_strategy in NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES)\n if skip_use_sharded_views:\n unsharded_flat_param = flat_param.data\n if self._offload_params:\n device = flat_param._local_shard.device\n _p_assert(device == torch.device('cpu'), f'Expects the local shard to be on CPU but got {device}')\n flat_param.data = flat_param._local_shard\n if self._use_orig_params:\n if skip_use_sharded_views:\n self._unsharded_flat_param_for_skipped_views = unsharded_flat_param\n else:\n self._use_sharded_views()\n if in_forward and (not self._skipped_use_sharded_views):\n accumulated_grad_in_no_sync = flat_param.grad is not None and self.uses_sharded_strategy and (flat_param.grad.shape == flat_param._unpadded_unsharded_size)\n if accumulated_grad_in_no_sync:\n self._use_unsharded_grad_views()\n else:\n self._use_sharded_grad_views()", + "docstring": "Switches to using the sharded flat parameter.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_use_sharded_flat_param arg:self arguments arg Assign If Assign Compare Assign BoolOp Call Compare If Assign If Assign Call Compare Call Assign If If Assign Call If BoolOp Assign BoolOp Compare Compare If Call Call" + }, + { + "library": "kornia", + "name": "_initialise_cluster_centers", + "source_code": "def _initialise_cluster_centers(self, X: Tensor, num_clusters: int) -> Tensor:\n num_samples: int = len(X)\n perm = torch.randperm(num_samples, device=X.device)\n idx = perm[:num_clusters]\n initial_state = X[idx]\n return initial_state", + "docstring": "Chooses num_cluster points from X as the initial cluster centers. Args: X: 2D input tensor to be clustered num_clusters: number of desired cluster centers Returns: 2D Tensor with num_cluster rows", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\kmeans.py", + "ast_data": "FunctionDef name:_initialise_cluster_centers arg:self arg:X arg:num_clusters arguments arg arg arg Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_find_producer_matmul", + "source_code": "def _find_producer_matmul(node: torch.fx.Node) -> Optional[_Matmul]:\n if node.target == aten.mm.default:\n return _Matmul.from_match(match=[node])\n elif node.target == aten._scaled_mm.default:\n return _ScaledMatmul.from_match(match=[node])\n elif node.target == aten.reshape.default:\n reshape_node_1 = node\n mm_node = reshape_node_1.args[0]\n assert isinstance(mm_node, torch.fx.Node)\n if mm_node.target not in (aten.mm.default, aten._scaled_mm.default):\n return None\n reshape_node_0 = mm_node.args[0]\n assert isinstance(reshape_node_0, torch.fx.Node)\n if reshape_node_0.target != aten.reshape.default:\n return None\n if mm_node.target == aten.mm.default:\n return _Matmul.from_match(match=[reshape_node_0, mm_node, reshape_node_1])\n elif mm_node.target == aten._scaled_mm.default:\n return _ScaledMatmul.from_match(match=[reshape_node_0, mm_node, reshape_node_1])\n return None", + "docstring": "Returns producer matmul node if found, otherwise returns None.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py", + "ast_data": "FunctionDef name:_find_producer_matmul arg:node arguments arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Assign Assign Call If Compare Return return:no Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call Return return:no" + }, + { + "library": "django", + "name": "get_max_num", + "source_code": "def get_max_num(self, request, obj=None, **kwargs):\n return self.max_num", + "docstring": "Hook for customizing the max number of extra inline forms.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_max_num arg:self arg:request arg:obj arguments arg arg arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "staged_predict_proba", + "source_code": "def staged_predict_proba(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n yield self._loss.predict_proba(raw_predictions)", + "docstring": "Predict class probabilities at each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted class probabilities of the input samples, for each iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:staged_predict_proba arg:self arg:X arguments arg arg For Call Call" + }, + { + "library": "tensorflow", + "name": "initializer", + "source_code": "@property\ndef initializer(self):\n return self._initializer_op", + "docstring": "The op responsible for initializing this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:initializer arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_unflatten_sparse_tensors", + "source_code": "def _unflatten_sparse_tensors(flat, tensors):\n flat_indices, flat_values = flat\n indices = torch._C._nn.unflatten_dense_tensors(flat_indices, [torch.Tensor._indices(t) for t in tensors])\n values = torch._C._nn.unflatten_dense_tensors(flat_values, [torch.Tensor._values(t) for t in tensors])\n outputs = []\n for t, i, v in zip(tensors, indices, values):\n outputs.append(t.new(i, v, t.size()))\n return tuple(outputs)", + "docstring": "View flat buffer (containing indices and values) using the sizes of tensors. Assume that tensors are of same sparse type, and that flat is given by _flatten_sparse_tensors. Args: flat (tuple(Tensor, Tensor)): flattened indices and values of sparse tensors to unflatten. tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to unflatten flat. Returns: Unflattened sparse tensors with sizes same as tensors and values from flat.", + "type": "function", + "file_path": "pytorch\\torch\\_utils.py", + "ast_data": "FunctionDef name:_unflatten_sparse_tensors arg:flat arg:tensors arguments arg arg Assign Assign Call Call Assign Call Call Assign For Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, w):\n return w", + "docstring": "Applies the constraint to the input weight variable. By default, the inputs weight variable is not modified. Users should override this method to implement their own projection function. Args: w: Input weight variable. Returns: Projected variable (by default, returns unmodified inputs).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:w arguments arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "general_source_directories_files", + "source_code": "def general_source_directories_files(top_path):\n pruned_directories = ['CVS', '.svn', 'build']\n prune_file_pat = re.compile('(?:[~#]|\\\\.py[co]|\\\\.o)$')\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n pruned = [d for d in dirnames if d not in pruned_directories]\n dirnames[:] = pruned\n for d in dirnames:\n dpath = os.path.join(dirpath, d)\n rpath = rel_path(dpath, top_path)\n files = []\n for f in os.listdir(dpath):\n fn = os.path.join(dpath, f)\n if os.path.isfile(fn) and (not prune_file_pat.search(fn)):\n files.append(fn)\n yield (rpath, files)\n dpath = top_path\n rpath = rel_path(dpath, top_path)\n filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) if not prune_file_pat.search(f)]\n files = [f for f in filenames if os.path.isfile(f)]\n yield (rpath, files)", + "docstring": "Return a directory name relative to top_path and files contained.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:general_source_directories_files arg:top_path arguments arg Assign Assign Call For Call Assign Compare Assign For Assign Call Assign Call Assign For Call Assign Call If BoolOp Call Call Call Assign Assign Call Assign Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "initial_value", + "source_code": "@property\ndef initial_value(self):\n raise NotImplementedError", + "docstring": "Returns the Tensor used as the initial value for the variable. Note that this is different from which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:initial_value arg:self arguments arg Raise" + }, + { + "library": "matplotlib", + "name": "get_geometry", + "source_code": "def get_geometry(self):\n return (self._nrows, self._ncols)", + "docstring": "Return a tuple containing the number of rows and columns in the grid.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:get_geometry arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_initializer", + "source_code": "def set_initializer(self, initializer):\n self._initializer = initializer", + "docstring": "Set initializer for this scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:set_initializer arg:self arg:initializer arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "delete_recursively_v2", + "source_code": "@tf_export('io.gfile.rmtree')\ndef delete_recursively_v2(path):\n _pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))", + "docstring": "Deletes everything under path recursively. Args: path: string, a path Raises: errors.OpError: If the operation fails.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:delete_recursively_v2 arg:path arguments arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "all_max", + "source_code": "def all_max(tensors):\n return _apply_all_reduce('max', tensors)", + "docstring": "Returns a list of tensors with the all-reduce max across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the maximum of the input tensors, where tensor i has the same device as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py", + "ast_data": "FunctionDef name:all_max arg:tensors arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, ax, *args, **kwargs):\n super().__init__(ax, *args, **kwargs)", + "docstring": "Draw triangular grid contour lines or filled regions, depending on whether keyword arg *filled* is False (default) or True. The first argument of the initializer must be an object. The remaining arguments and keyword arguments are described in the docstring of .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ax arguments arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "run", + "source_code": "def run(fn=None):\n if fn is not None:\n fn = innermost_fn(fn)\n assert callable(fn)\n return RunOnlyContext()(fn)\n return RunOnlyContext()", + "docstring": "Don't do any dynamic compiles, just use prior optimizations", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:run arg:fn arguments arg If Compare Assign Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "gaussian_blur2d", + "source_code": "def gaussian_blur2d(input: Tensor, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect', separable: bool=True) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(input)\n if isinstance(sigma, tuple):\n sigma = tensor([sigma], device=input.device, dtype=input.dtype)\n else:\n KORNIA_CHECK_IS_TENSOR(sigma)\n sigma = sigma.to(device=input.device, dtype=input.dtype)\n if separable:\n ky, kx = _unpack_2d_ks(kernel_size)\n bs = sigma.shape[0]\n kernel_x = get_gaussian_kernel1d(kx, sigma[:, 1].view(bs, 1))\n kernel_y = get_gaussian_kernel1d(ky, sigma[:, 0].view(bs, 1))\n out = filter2d_separable(input, kernel_x, kernel_y, border_type)\n else:\n kernel = get_gaussian_kernel2d(kernel_size, sigma)\n out = filter2d(input, kernel, border_type)\n return out", + "docstring": "Create an operator that blurs a tensor using a Gaussian filter. .. image:: _static/img/gaussian_blur2d.png The operator smooths the given tensor with a gaussian kernel by convolving it to each channel. It supports batched operation. Arguments: input: the input tensor with shape :math:. kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)here `__. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5]) >>> output = gaussian_blur2d(input, (3, 3), torch.tensor([[1.5, 1.5]])) >>> output.shape torch.Size([2, 4, 5, 5])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\gaussian.py", + "ast_data": "FunctionDef name:gaussian_blur2d arg:input arg:kernel_size arg:sigma arg:border_type arg:separable arguments arg arg arg arg arg Call If Call Assign Call Call Assign Call If Assign Call Assign Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_concrete_function", + "source_code": "def get_concrete_function(self, *args, **kwargs):\n return self._get_func().get_concrete_function(*args, **kwargs)", + "docstring": "Returns a concrete function of the decorated function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\authoring\\authoring.py", + "ast_data": "FunctionDef name:get_concrete_function arg:self arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "inplace_csr_row_scale", + "source_code": "def inplace_csr_row_scale(X, scale):\n assert scale.shape[0] == X.shape[0]\n X.data *= np.repeat(scale, np.diff(X.indptr))", + "docstring": "Inplace row scaling of a CSR matrix. Scale each sample of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix to be scaled. It should be of CSR format. scale : ndarray of float of shape (n_samples,) Array of precomputed sample-wise values to use for scaling.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py", + "ast_data": "FunctionDef name:inplace_csr_row_scale arg:X arg:scale arguments arg arg Compare Call Call" + }, + { + "library": "scikit-learn", + "name": "TreeNode", + "source_code": "class TreeNode:\n\n def __init__(self, *, depth, sample_indices, partition_start, partition_stop, sum_gradients, sum_hessians, value=None):\n self.depth = depth\n self.sample_indices = sample_indices\n self.n_samples = sample_indices.shape[0]\n self.sum_gradients = sum_gradients\n self.sum_hessians = sum_hessians\n self.value = value\n self.is_leaf = False\n self.allowed_features = None\n self.interaction_cst_indices = None\n self.set_children_bounds(float('-inf'), float('+inf'))\n self.split_info = None\n self.left_child = None\n self.right_child = None\n self.histograms = None\n self.partition_start = partition_start\n self.partition_stop = partition_stop\n\n def set_children_bounds(self, lower, upper):\n self.children_lower_bound = lower\n self.children_upper_bound = upper\n\n def __lt__(self, other_node):\n return self.split_info.gain > other_node.split_info.gain", + "docstring": "Tree Node class used in TreeGrower. This isn't used for prediction purposes, only for training (see TreePredictor). Parameters ---------- depth : int The depth of the node, i.e. its distance from the root. sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 The indices of the samples at the node. partition_start : int start position of the node's sample_indices in splitter.partition. partition_stop : int stop position of the node's sample_indices in splitter.partition. sum_gradients : float The sum of the gradients of the samples at the node. sum_hessians : float The sum of the hessians of the samples at the node. Attributes ---------- depth : int The depth of the node, i.e. its distance from the root. sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 The indices of the samples at the node. sum_gradients : float The sum of the gradients of the samples at the node. sum_hessians : float The sum of the hessians of the samples at the node. split_info : SplitInfo or None The result of the split evaluation. is_leaf : bool True if node is a leaf left_child : TreeNode or None The left child of the node. None for leaves. right_child : TreeNode or None The right child of the node. None for leaves. value : float or None The value of the leaf, as computed in finalize_leaf(). None for non-leaf nodes. partition_start : int start position of the node's sample_indices in splitter.partition. partition_stop : int stop position of the node's sample_indices in splitter.partition. allowed_features : None or ndarray, dtype=int Indices of features allowed to split for children. interaction_cst_indices : None or list of ints Indices of the interaction sets that have to be applied on splits of child nodes. The fewer sets the stronger the constraint as fewer sets contain fewer features. children_lower_bound : float children_upper_bound : float", + "type": "class", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "ClassDef name:TreeNode FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Call Assign Assign Assign Assign Assign Assign FunctionDef name:set_children_bounds arg:self arg:lower arg:upper arguments arg arg arg Assign Assign FunctionDef name:__lt__ arg:self arg:other_node arguments arg arg Return return:yes Compare" + }, + { + "library": "scipy", + "name": "insert", + "source_code": "def insert(x, tck, m=1, per=0):\n if isinstance(tck, BSpline):\n t, c, k = tck.tck\n sh = tuple(range(c.ndim))\n c = c.transpose(sh[1:] + (0,))\n t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)\n c_ = np.asarray(c_)\n c_ = c_.transpose((sh[-1],) + sh[:-1])\n return BSpline(t_, c_, k_)\n else:\n return _impl.insert(x, tck, m, per)", + "docstring": "Insert knots into a B-spline. .. legacy:: function Specifically, we recommend constructing a object and using its `mxtckBSpline`t(k+1) >> from scipy.interpolate import splrep, insert >>> import numpy as np >>> x = np.linspace(0, 10, 5) >>> y = np.sin(x) >>> tck = splrep(x, y) >>> tck[0] array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.]) A knot is inserted: >>> tck_inserted = insert(3, tck) >>> tck_inserted[0] array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.]) Some knots are inserted: >>> tck_inserted2 = insert(8, tck, m=3) >>> tck_inserted2[0] array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py", + "ast_data": "FunctionDef name:insert arg:x arg:tck arg:m arg:per arguments arg arg arg arg If Call Assign Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "SymLogNorm", + "source_code": "@make_norm_from_scale(scale.SymmetricalLogScale, init=lambda linthresh, linscale=1.0, vmin=None, vmax=None, clip=False, *, base=10: None)\nclass SymLogNorm(Normalize):\n\n @property\n def linthresh(self):\n return self._scale.linthresh\n\n @linthresh.setter\n def linthresh(self, value):\n self._scale.linthresh = value", + "docstring": "The symmetrical logarithmic scale is logarithmic in both the positive and negative directions from the origin. Since the values close to zero tend toward infinity, there is a need to have a range around zero that is linear. The parameter *linthresh* allows the user to specify the size of this range (-*linthresh*, *linthresh*). Parameters ---------- linthresh : float The range within which the plot is linear (to avoid having the plot go to infinity around zero). linscale : float, default: 1 This allows the linear range (-*linthresh* to *linthresh*) to be stretched relative to the logarithmic range. Its value is the number of decades to use for each half of the linear range. For example, when *linscale* == 1.0 (the default), the space used for the positive and negative halves of the linear range will be equal to one decade in the logarithmic range. base : float, default: 10", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "ClassDef name:SymLogNorm FunctionDef name:linthresh arg:self arguments arg Return return:yes FunctionDef name:linthresh arg:self arg:value arguments arg arg Assign Call arguments arg arg arg arg arg arg" + }, + { + "library": "pandas", + "name": "reset_cache", + "source_code": "@classmethod\ndef reset_cache(cls) -> None:\n cls._cache_dtypes = {}", + "docstring": "clear the cache", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:reset_cache arg:cls arguments arg Assign" + }, + { + "library": "scipy", + "name": "mmread", + "source_code": "def mmread(source, *, spmatrix=True):\n cursor, stream_to_close = _get_read_cursor(source)\n if cursor.header.format == 'array':\n mat = _read_body_array(cursor)\n if stream_to_close:\n stream_to_close.close()\n return mat\n else:\n triplet, shape = _read_body_coo(cursor, generalize_symmetry=True)\n if stream_to_close:\n stream_to_close.close()\n if spmatrix:\n return coo_matrix(triplet, shape=shape)\n return coo_array(triplet, shape=shape)", + "docstring": "Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file-like object. spmatrix : bool, optional (default: True) If `threadpoolctl `_ to override: >>> import threadpoolctl >>> >>> with threadpoolctl.threadpool_limits(limits=2): ... m = mmread(StringIO(text), spmatrix=False)", + "type": "function", + "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py", + "ast_data": "FunctionDef name:mmread arg:source arguments arg arg Assign Call If Compare Assign Call If Call Return return:yes Assign Call If Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n check_params = dict(accept_sparse=False, ensure_2d=False)\n X = check_array(X, input_name='X', dtype=[np.float64, np.float32], **check_params)\n y = check_array(y, input_name='y', dtype=X.dtype, **check_params)\n check_consistent_length(X, y, sample_weight)\n X, y = self._build_y(X, y, sample_weight)\n self.X_thresholds_, self.y_thresholds_ = (X, y)\n self._build_f(X, y)\n return self", + "docstring": "Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples,) or (n_samples, 1) Training data. .. versionchanged:: 0.24 Also accepts 2d array with 1 feature. y : array-like of shape (n_samples,) Training target. sample_weight : array-like of shape (n_samples,), default=None Weights. If set to None, all weights will be set to 1 (equal weights). Returns ------- self : object Returns an instance of self. Notes ----- X is stored for future use, as :meth: needs X to interpolate new input data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_set_order", + "source_code": "def _set_order(X, y, order='C'):\n if order not in [None, 'C', 'F']:\n raise ValueError(\"Unknown value for order. Got {} instead of None, 'C' or 'F'.\".format(order))\n sparse_X = sparse.issparse(X)\n sparse_y = sparse.issparse(y)\n if order is not None:\n sparse_format = 'csc' if order == 'F' else 'csr'\n if sparse_X:\n X = X.asformat(sparse_format, copy=False)\n else:\n X = np.asarray(X, order=order)\n if sparse_y:\n y = y.asformat(sparse_format)\n else:\n y = np.asarray(y, order=order)\n return (X, y)", + "docstring": "Change the order of X and y if necessary. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. order : {None, 'C', 'F'} If 'C', dense arrays are returned as C-ordered, sparse matrices in csr format. If 'F', dense arrays are return as F-ordered, sparse matrices in csc format. Returns ------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data with guaranteed order. y : ndarray of shape (n_samples,) Target values with guaranteed order.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py", + "ast_data": "FunctionDef name:_set_order arg:X arg:y arg:order arguments arg arg arg If Compare Raise Call Call Assign Call Assign Call If Compare Assign Compare If Assign Call Assign Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "on_run_end", + "source_code": "@abc.abstractmethod\ndef on_run_end(self, request):\n pass", + "docstring": "Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens right before the wrapper exits its run() call. Args: request: () callback request object carrying information such as the actual action performed by the session wrapper for the run() call. Returns: An instance of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:on_run_end arg:self arg:request arguments arg arg" + }, + { + "library": "pandas", + "name": "idelete", + "source_code": "def idelete(self, indexer) -> BlockManager:\n is_deleted = np.zeros(self.shape[0], dtype=np.bool_)\n is_deleted[indexer] = True\n taker = (~is_deleted).nonzero()[0]\n nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True)\n new_columns = self.items[~is_deleted]\n axes = [new_columns, self.axes[1]]\n return type(self)(tuple(nbs), axes, verify_integrity=False)", + "docstring": "Delete selected locations, returning a new BlockManager.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:idelete arg:self arg:indexer arguments arg arg Assign Call Assign Assign Call Assign Call Assign Assign Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "EagerGraphCombination", + "source_code": "class EagerGraphCombination(test_combinations.TestCombination):\n\n def context_managers(self, kwargs):\n mode = kwargs.pop('mode', None)\n if mode is None:\n return []\n elif mode == 'eager':\n return [context.eager_mode()]\n elif mode == 'graph':\n return [ops.Graph().as_default(), context.graph_mode()]\n else:\n raise ValueError(f\"Argument 'mode' must be either 'eager' or 'graph'. Received: {mode}.\")\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('mode')]", + "docstring": "Run the test in Graph or Eager mode. The optional parameter controls the test's execution mode. Its accepted values are \"graph\" or \"eager\" literals.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\combinations.py", + "ast_data": "ClassDef name:EagerGraphCombination FunctionDef name:context_managers arg:self arg:kwargs arguments arg arg Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call Call Call Raise Call FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "collect_callgrind", + "source_code": "def collect_callgrind(self, task_spec: common.TaskSpec, globals: dict[str, Any], *, number: int, repeats: int, collect_baseline: bool, is_python: bool, retain_out_file: bool) -> tuple[CallgrindStats, ...]:\n self._validate()\n assert is_python or not collect_baseline\n *task_stats, baseline_stats = self._invoke(task_spec=task_spec, globals=globals, number=number, repeats=repeats, collect_baseline=collect_baseline, is_python=is_python, retain_out_file=retain_out_file)\n assert len(task_stats) == repeats\n return tuple((CallgrindStats(task_spec=task_spec, number_per_run=number, built_with_debug_symbols=self._build_type == 'RelWithDebInfo', baseline_inclusive_stats=baseline_stats[0], baseline_exclusive_stats=baseline_stats[1], stmt_inclusive_stats=stmt_inclusive_stats, stmt_exclusive_stats=stmt_exclusive_stats, stmt_callgrind_out=out_contents) for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats))", + "docstring": "Collect stats, and attach a reference run which can be used to filter interpreter overhead.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", + "ast_data": "FunctionDef name:collect_callgrind arg:self arg:task_spec arg:globals arguments arg arg arg arg arg arg arg arg Call BoolOp Assign Call Compare Call Return return:yes Call Call Compare" + }, + { + "library": "tensorflow", + "name": "_bucketized_column", + "source_code": "def _bucketized_column(source_column, boundaries):\n if not isinstance(source_column, _NumericColumn):\n raise ValueError('source_column must be a column generated with numeric_column(). Given: {}'.format(source_column))\n if len(source_column.shape) > 1:\n raise ValueError('source_column must be one-dimensional column. Given: {}'.format(source_column))\n if not boundaries or not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):\n raise ValueError('boundaries must be a sorted list.')\n for i in range(len(boundaries) - 1):\n if boundaries[i] >= boundaries[i + 1]:\n raise ValueError('boundaries must be a sorted list.')\n return _BucketizedColumn(source_column, tuple(boundaries))", + "docstring": "Represents discretized dense input. Buckets include the left boundary, and exclude the right boundary. Namely, generates buckets , , , and . For example, if the inputs are then the output will be Example: A can also be crossed with another categorical column using : Args: source_column: A one-dimensional dense column which is generated with . boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A . Raises: ValueError: If is not a numeric column, or if it is not one-dimensional. ValueError: If is not a sorted list or tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_bucketized_column arg:source_column arg:boundaries arguments arg arg If Call Raise Call Call If Compare Call Raise Call Call If BoolOp BoolOp Call Call Raise Call For Call Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "cumsum", + "source_code": "@tf_export('math.cumsum', 'cumsum')\n@dispatch.add_dispatch_support\ndef cumsum(x, axis=0, exclusive=False, reverse=False, name=None):\n with ops.name_scope(name, 'Cumsum', [x]) as name:\n x = ops.convert_to_tensor(x, name='x')\n return gen_math_ops.cumsum(x, axis, exclusive=exclusive, reverse=reverse, name=name)", + "docstring": "Compute the cumulative sum of the tensor along . By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: For example: >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x) >>> # using varying values >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]]) >>> tf.cumsum(y, axis=0) >>> tf.cumsum(y, axis=1) By setting the kwarg to , an exclusive cumsum is performed instead: >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, exclusive=True) By setting the kwarg to , the cumsum is performed in the opposite direction: >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, reverse=True) This is more efficient than using separate ops. The and kwargs can also be combined: >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, exclusive=True, reverse=True) Args: x: A . Must be one of the following types: , , , , , , , , , , , , , . axis: A of type (default: 0). Must be in the range . exclusive: If , perform exclusive cumsum. reverse: A (default: False). name: A name for the operation (optional). Returns: A . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:cumsum arg:x arg:axis arg:exclusive arg:reverse arg:name arguments arg arg arg arg arg With Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "initialize", + "source_code": "def initialize(self):\n if ops.executing_eagerly_outside_functions():\n self._iterator._eager_reset()\n return []\n else:\n return [self._iterator.initializer]", + "docstring": "Initialize underlying iterator. In eager execution, this simply recreates the underlying iterator. In graph execution, it returns the initializer ops for the underlying iterator. Returns: A list of any initializer ops that should be run.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "FunctionDef name:initialize arg:self arguments arg If Call Call Return return:no Return return:yes" + }, + { + "library": "scipy", + "name": "Penalty02", + "source_code": "class Penalty02(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))\n self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])\n self.global_optimum = [[1.0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n a, b, c = (5.0, 100.0, 4.0)\n xx = abs(x)\n u = where(xx > a, b * (xx - a) ** c, 0.0)\n return sum(u) + 0.1 * (10 * sin(3.0 * pi * x[0]) ** 2.0 + sum((x[:-1] - 1.0) ** 2.0 * (1.0 + sin(3 * pi * x[1:]) ** 2.0)) + (x[-1] - 1) ** 2.0 * (1 + sin(2 * pi * x[-1]) ** 2.0))", + "docstring": "Penalty 2 objective function. This class defines the Penalty 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Penalty02}}(x) = 0.1 \\left\\{\\sin^2(3\\pi x_1) + \\sum_{i=1}^{n-1} (x_i - 1)^2 \\left[1 + \\sin^2(3\\pi x_{i+1}) \\right ] + (x_n - 1)^2 \\left [1 + \\sin^2(2 \\pi x_n) \\right ]\\right \\} + \\sum_{i=1}^n u(x_i, 5, 100, 4) Where, in this exercise: .. math:: u(x_i, a, k, m) = \\begin{cases} k(x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i > a \\\\ 0 & \\textrm{if} \\hspace{5pt} -a \\leq x_i \\leq a \\\\ k(-x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i < -a \\\\ \\end{cases} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", + "ast_data": "ClassDef name:Penalty02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Assign Call Compare Return return:yes Call Call Call Call Call" + }, + { + "library": "django", + "name": "convert_extent3d", + "source_code": "def convert_extent3d(self, box3d):\n if box3d is None:\n return None\n ll, ur = box3d[6:-1].split(',')\n xmin, ymin, zmin = map(float, ll.split())\n xmax, ymax, zmax = map(float, ur.split())\n return (xmin, ymin, zmin, xmax, ymax, zmax)", + "docstring": "Return a 6-tuple extent for the aggregate by converting the 3d bounding-box text returned by PostGIS ( argument), for example: \"BOX3D(-90.0 30.0 1, -85.0 40.0 2)\".", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", + "ast_data": "FunctionDef name:convert_extent3d arg:self arg:box3d arguments arg arg If Compare Return return:no Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "looper", + "source_code": "class looper:\n\n def __init__(self, seq):\n self.seq = seq\n\n def __iter__(self):\n return looper_iter(self.seq)\n\n def __repr__(self):\n return '<%s for %r>' % (self.__class__.__name__, self.seq)", + "docstring": "Helper for looping (particularly in templates) Use this like:: for loop, item in looper(seq): if loop.first: ...", + "type": "class", + "file_path": "numpy\\numpy\\_build_utils\\tempita\\_looper.py", + "ast_data": "ClassDef name:looper FunctionDef name:__init__ arg:self arg:seq arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "find_config", + "source_code": "def find_config(self, path, key, default=None):\n trail = path or '/'\n while trail:\n nodeconf = self.config.get(trail, {})\n if key in nodeconf:\n return nodeconf[key]\n lastslash = trail.rfind('/')\n if lastslash == -1:\n break\n elif lastslash == 0 and trail != '/':\n trail = '/'\n else:\n trail = trail[:lastslash]\n return default", + "docstring": "Return the most-specific value for key along path, or default.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:find_config arg:self arg:path arg:key arg:default arguments arg arg arg arg Assign BoolOp While Assign Call If Compare Return return:yes Assign Call If Compare If BoolOp Compare Compare Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "so2", + "source_code": "@property\ndef so2(self) -> So2:\n return self._rotation", + "docstring": "Return the underlying .", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:so2 arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "__setattr__", + "source_code": "def __setattr__(self, name, value):\n if isinstance(value, Tool):\n if value._name is None:\n value._name = name\n value.namespace = self.namespace\n object.__setattr__(self, name, value)", + "docstring": "Set an attribute on this :class: instance.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Call If Compare Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "update_regroup", + "source_code": "def update_regroup(extended, updates, group):\n if not group:\n regrouped = regroup(updates, values_lib.Mirrored)\n return nest.map_structure(extended._local_results, regrouped)\n\n def _make_grouped_mirrored(values):\n if len(values) == 1:\n return values_lib.Mirrored(values)\n g = control_flow_ops.group(values)\n if not all((tensor_util.is_tf_type(v) for v in values)):\n return g\n with_dep = []\n for v in values:\n with ops.device(v.device), ops.control_dependencies([g]):\n with_dep.append(array_ops.identity(v))\n return values_lib.Mirrored(with_dep)\n return regroup(updates, _make_grouped_mirrored)", + "docstring": "Regroup for an update, with dependencies to ensure all updates execute.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py", + "ast_data": "FunctionDef name:update_regroup arg:extended arg:updates arg:group arguments arg arg arg If Assign Call Return return:yes Call FunctionDef name:_make_grouped_mirrored arg:values arguments arg If Compare Call Return return:yes Call Assign Call If Call Call Return return:yes Assign For With Call Call Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "fetch", + "source_code": "def fetch(self):\n raise NotImplementedError('Must be implemented in subclasses.')", + "docstring": "Wait for the result of and return the numpy result. This makes the value concrete by copying the remote value to local. Returns: The numpy array structure of the actual output of the associated with this , previously returned by a call. This can be a single value, or a structure of values, depending on the output of the . Raises: tf.errors.CancelledError: If the function that produces this is aborted or cancelled due to failure.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\remote_value.py", + "ast_data": "FunctionDef name:fetch arg:self arguments arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X):\n if self.validate:\n X = check_array(X, accept_sparse=self.accept_sparse)\n return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)", + "docstring": "Transform X using the inverse function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) if else any object that can handle Input array. Returns ------- X_original : array-like, shape (n_samples, n_features) Transformed input.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg If Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "add_object_type", + "source_code": "def add_object_type(self, directivename: str, rolename: str, indextemplate: str='', parse_node: Callable[[BuildEnvironment, str, addnodes.desc_signature], str] | None=None, ref_nodeclass: type[nodes.TextElement] | None=None, objname: str='', doc_field_types: Sequence[Field]=(), override: bool=False) -> None:\n self.registry.add_object_type(directivename, rolename, indextemplate, parse_node, ref_nodeclass, objname, doc_field_types, override=override)", + "docstring": "Register a new object type. This method is a very convenient way to add a new :term: type that can be cross-referenced. It will do this: - Create a new directive (called *directivename*) for documenting an object. It will automatically add index entries if *indextemplate* is nonempty; if given, it must contain exactly one instance of `conf.pyfunctionxref-syntax`). If *override* is True, the given object_type is forcedly installed even if an object_type having the same name is already installed. .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_object_type arg:self arg:directivename arg:rolename arg:indextemplate arg:parse_node arg:ref_nodeclass arg:objname arg:doc_field_types arg:override arguments arg arg arg arg arg arg arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "terminate_all", + "source_code": "def terminate_all(self, sig=None):\n with self._process_lock:\n self._terminate_all(sig)", + "docstring": "Terminates all subprocesses.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "FunctionDef name:terminate_all arg:self arg:sig arguments arg arg With Call" + }, + { + "library": "numpy", + "name": "shares_memory", + "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)\ndef shares_memory(a, b, max_work=None):\n return (a, b)", + "docstring": "shares_memory(a, b, /, max_work=None) Determine if two arrays share memory. .. warning:: This function can be exponentially slow for some inputs, unless is set to zero or a positive integer. If in doubt, use instead. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem (maximum number of candidate solutions to consider). The following special values are recognized: max_work=-1 (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. Finding the exact solution may take extremely long in some cases. max_work=0 Only the memory bounds of a and b are checked. This is equivalent to using `max_workmax_work` set takes around 1 minute for this case. It is possible to find problems that take still significantly longer.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\multiarray.py", + "ast_data": "FunctionDef name:shares_memory arg:a arg:b arg:max_work arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "register", + "source_code": "def register(self, name):\n\n def wrapper(writer_cls):\n self._registered[name] = writer_cls\n return writer_cls\n return wrapper", + "docstring": "Decorator for registering a class under a name. Example use:: @registry.register(name) class Foo: pass", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:register arg:self arg:name arguments arg arg FunctionDef name:wrapper arg:writer_cls arguments arg Assign Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "polar", + "source_code": "def polar(*args, **kwargs) -> list[Line2D]:\n if gcf().get_axes():\n ax = gca()\n if not isinstance(ax, PolarAxes):\n _api.warn_deprecated('3.10', message=\"There exists a non-polar current Axes. Therefore, the resulting plot from 'polar()' is non-polar. You likely should call 'polar()' before any other pyplot plotting commands. Support for this scenario is deprecated in %(since)s and will raise an error in %(removal)s\")\n else:\n ax = axes(projection='polar')\n return ax.plot(*args, **kwargs)", + "docstring": "Make a polar plot. call signature:: polar(theta, r, [fmt], **kwargs) This is a convenience wrapper around . It ensures that the current Axes is polar (or creates one if needed) and then passes all parameters to `pyplot API ` call will fail.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:polar arguments arg arg If Call Call Assign Call If Call Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_is_scalar_access", + "source_code": "def _is_scalar_access(self, key: tuple) -> bool:\n if len(key) != self.ndim:\n return False\n return all((is_integer(k) for k in key))", + "docstring": "Returns ------- bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_is_scalar_access arg:self arg:key arguments arg arg If Compare Call Return return:yes Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "rgb255_to_normals", + "source_code": "def rgb255_to_normals(image: Tensor) -> Tensor:\n KORNIA_CHECK_IS_COLOR(image)\n normals = normalize(image / 255.0 * 2.0 - 1.0, dim=-3, p=2.0)\n return normals", + "docstring": "Convert an image from RGB [0, 255] to surface normals for visualization purposes. Args: image: RGB Image to be converted to surface normals of shape :math:. Returns: surface normals version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb255_to_normals(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "FunctionDef name:rgb255_to_normals arg:image arguments arg Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_ticks", + "source_code": "def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):\n if labels is None and kwargs:\n first_key = next(iter(kwargs))\n raise ValueError(f\"Incorrect use of keyword argument {first_key!r}. Keyword arguments other than 'minor' modify the text labels and can only be used if 'labels' are passed as well.\")\n result = self._set_tick_locations(ticks, minor=minor)\n if labels is not None:\n self.set_ticklabels(labels, minor=minor, **kwargs)\n return result", + "docstring": "Set this Axis' tick locations and optionally tick labels. If necessary, the view limits of the Axis are expanded so that all given ticks are visible. Parameters ---------- ticks : 1D array-like Array of tick locations (either floats or in axis units). The axis is replaced by a . Pass an empty list (`.Axis.set_major_formatter.FixedFormatter.Formatter.Text~.Axes.tick_params`. Notes ----- The mandatory expansion of the view limits is an intentional design choice to prevent the surprise of a non-visible tick. If you need other limits, you should set the limits explicitly after setting the ticks.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_ticks arg:self arg:ticks arg:labels arguments arg arg arg arg arg If BoolOp Compare Assign Call Call Raise Call Assign Call If Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "isna", + "source_code": "@doc(klass=_shared_doc_kwargs['klass'])\ndef isna(self) -> Self:\n return isna(self).__finalize__(self, method='isna')", + "docstring": "Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings `numpy.inf` are not considered NA values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame( ... dict( ... age=[5, 6, np.nan], ... born=[ ... pd.NaT, ... pd.Timestamp(\"1939-05-27\"), ... pd.Timestamp(\"1940-04-25\"), ... ], ... name=[\"Alfred\", \"Batman\", \"\"], ... toy=[None, \"Batmobile\", \"Joker\"], ... ) ... ) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.nan]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:isna arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "StrictlyAboveLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass StrictlyAboveLookup(GISLookup):\n lookup_name = 'strictly_above'", + "docstring": "The 'strictly_above' operator returns true if A's bounding box is strictly above B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:StrictlyAboveLookup Assign" + }, + { + "library": "authlib", + "name": "create_revocation_endpoint", + "source_code": "def create_revocation_endpoint(session, token_model):\n from authlib.oauth2.rfc7009 import RevocationEndpoint\n query_token = create_query_token_func(session, token_model)\n\n class _RevocationEndpoint(RevocationEndpoint):\n\n def query_token(self, token, token_type_hint):\n return query_token(token, token_type_hint)\n\n def revoke_token(self, token, request):\n now = int(time.time())\n hint = request.form.get('token_type_hint')\n token.access_token_revoked_at = now\n if hint != 'access_token':\n token.refresh_token_revoked_at = now\n session.add(token)\n session.commit()\n return _RevocationEndpoint", + "docstring": "Create a revocation endpoint class with SQLAlchemy session and token model. :param session: SQLAlchemy session :param token_model: Token model class", + "type": "function", + "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py", + "ast_data": "FunctionDef name:create_revocation_endpoint arg:session arg:token_model arguments arg arg Assign Call ClassDef name:_RevocationEndpoint FunctionDef name:query_token arg:self arg:token arg:token_type_hint arguments arg arg arg Return return:yes Call FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Assign Call Call Assign Call Assign If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "allow_nan_stats", + "source_code": "@property\ndef allow_nan_stats(self):\n return self._allow_nan_stats", + "docstring": "Python describing behavior when a stat is undefined. Stats return +/- infinity when it makes sense. E.g., the variance of a Cauchy distribution is infinity. However, sometimes the statistic is undefined, e.g., if a distribution's pdf does not achieve a maximum within the support of the distribution, the mode is undefined. If the mean is undefined, then by definition the variance is undefined. E.g. the mean for Student's T for df = 1 is undefined (no clear way to say it is either + or - infinity), so the variance = E[(X - mean)**2] is also undefined. Returns: allow_nan_stats: Python .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:allow_nan_stats arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "are_long_distant_nodes", + "source_code": "def are_long_distant_nodes(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n proximity_score = max(abs(node1.min_order - node2.max_order), abs(node2.min_order - node1.max_order))\n return proximity_score > 64", + "docstring": "This function prevents fusion for nodes that can increase memory footprint. This problem is more common in horizontal fusion, where nodes that are far apart in the original order get fused, lengthening the live intervals of tensors. This is very evident in models with activation checkpointing, where the recomputed nodes from different checkpointed regions get fused and significantly increase the memory footprint. The current attempt is a quick, possibly hacky, heuristic to prevent the fusion of nodes that are far away in the original order. A better but difficult to implement heurisitic would be to use live intervals of the buffers, find region of peak pressure in the original program and prevent fusion that crosses that peak region. We might need special care or good approximation in this implementation, as fusion of node changes live intervals, and re-computing live intervals and peak memory after each fusion can introduce large compilation overhead.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:are_long_distant_nodes arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call Call Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):\n if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n with ops.init_scope():\n self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n else:\n self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n self._name = name if name is not None else 'key_value_init'\n if context.executing_eagerly():\n self._name += str(ops.uid())\n super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, self._values.dtype)", + "docstring": "Constructs a table initializer object based on keys and values tensors. Args: keys: The tensor for the keys. values: The tensor for the values. key_dtype: The data type. Used when is a python array. value_dtype: The data type. Used when is a python array. name: A name for the operation (optional).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:keys arg:values arg:key_dtype arg:value_dtype arg:name arguments arg arg arg arg arg arg If BoolOp Call Compare Call Call With Call Assign Call Assign Call Assign Call Assign Call Assign Compare If Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "barycenter_kneighbors_graph", + "source_code": "def barycenter_kneighbors_graph(X, n_neighbors, reg=0.001, n_jobs=None):\n knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)\n X = knn._fit_X\n n_samples = knn.n_samples_fit_\n ind = knn.kneighbors(X, return_distance=False)[:, 1:]\n data = barycenter_weights(X, X, ind, reg=reg)\n indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)\n return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))", + "docstring": "Computes the barycenter weighted graph of k-Neighbors for points in X Parameters ---------- X : {array-like, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array or a NearestNeighbors object. n_neighbors : int Number of neighbors for each sample. reg : float, default=1e-3 Amount of regularization when solving the least-squares problem. Only relevant if mode='barycenter'. If None, use the default. n_jobs : int or None, default=None The number of parallel jobs to run for neighbors search. `joblib.parallel_backendGlossary ` for more details. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. See Also -------- sklearn.neighbors.kneighbors_graph sklearn.neighbors.radius_neighbors_graph", + "type": "function", + "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py", + "ast_data": "FunctionDef name:barycenter_kneighbors_graph arg:X arg:n_neighbors arg:reg arg:n_jobs arguments arg arg arg arg Assign Call Call Assign Assign Assign Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "from_file", + "source_code": "@classmethod\ndef from_file(cls, filename, shared, size):\n _warn_typed_storage_removal()\n if cls == TypedStorage:\n raise RuntimeError('from_file can only be called on derived classes')\n untyped_storage = UntypedStorage.from_file(filename, shared, size * torch._utils._element_size(cls.dtype))\n storage = cls(wrap_storage=untyped_storage)\n return storage", + "docstring": "from_file(filename, shared=False, size=0) -> Storage Creates a CPU storage backed by a memory-mapped file. If `mmap(2) call `_) size (int): number of elements in the storage", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:from_file arg:cls arg:filename arg:shared arg:size arguments arg arg arg arg Call If Compare Raise Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "parse_example_spec", + "source_code": "@property\ndef parse_example_spec(self):\n return self.categorical_column.parse_example_spec", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "not_", + "source_code": "def not_(a):\n if tensor_util.is_tf_type(a):\n return _tf_not(a)\n return _py_not(a)", + "docstring": "Functional form of \"not\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py", + "ast_data": "FunctionDef name:not_ arg:a arguments arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "comb", + "source_code": "def comb(N, k, *, exact=False, repetition=False):\n if repetition:\n return comb(N + k - 1, k, exact=exact)\n if exact:\n if int(N) == N and int(k) == k:\n return _comb_int(N, k)\n else:\n raise ValueError('Non-integer `N` and `k` with `exact=True` is not supported.')\n else:\n k, N = (asarray(k), asarray(N))\n cond = (k <= N) & (N >= 0) & (k >= 0)\n vals = binom(N, k)\n if isinstance(vals, np.ndarray):\n vals[~cond] = 0\n elif not cond:\n vals = np.float64(0)\n return vals", + "docstring": "The number of combinations of N things taken k at a time. This is often expressed as \"N choose k\". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional For integers, if is False, then floating point precision is used, otherwise the result is computed exactly. repetition : bool, optional If is True, then the number of combinations with repetition is computed. Returns ------- val : int, float, ndarray The total number of combinations. See Also -------- binom : Binomial coefficient considered as a function of two real variables. Notes ----- - Array arguments accepted only for exact=False case. - If N N and repetition=False, then 0 is returned. Examples -------- >>> import numpy as np >>> from scipy.special import comb >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> comb(n, k, exact=False) array([ 120., 210.]) >>> comb(10, 3, exact=True) 120 >>> comb(10, 3, exact=True, repetition=True) 220", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:comb arg:N arg:k arguments arg arg arg arg If Return return:yes Call If If BoolOp Compare Call Compare Call Return return:yes Call Raise Call Assign Call Call Assign Compare Compare Compare Assign Call If Call Assign If Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add_opset_imports", + "source_code": "def add_opset_imports(model: ir.Model) -> None:\n for node in ir.traversal.RecursiveGraphIterator(model.graph):\n domain = node.domain\n _maybe_set_opset_version(model.opset_imports, domain, node.version)\n for function in model.functions.values():\n for node in ir.traversal.RecursiveGraphIterator(function):\n domain = node.domain\n _maybe_set_opset_version(function.opset_imports, domain, node.version)\n for domain, version in function.opset_imports.items():\n _maybe_set_opset_version(model.opset_imports, domain, version)", + "docstring": "Collect all opsets used and add opset imports to the model and functions.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py", + "ast_data": "FunctionDef name:add_opset_imports arg:model arguments arg For Call Assign Call For Call For Call Assign Call For Call Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, x=None, y=None, z=None, srid=None):\n if x is None:\n coords = []\n elif isinstance(x, (tuple, list)):\n coords = x\n elif isinstance(x, (float, int)) and isinstance(y, (float, int)):\n if isinstance(z, (float, int)):\n coords = [x, y, z]\n else:\n coords = [x, y]\n else:\n raise TypeError('Invalid parameters given for Point initialization.')\n point = self._create_point(len(coords), coords)\n super().__init__(point, srid=srid)", + "docstring": "The Point object may be initialized with either a tuple, or individual parameters. For example: >>> p = Point((5, 23)) # 2D point, passed in as a tuple >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arg:z arg:srid arguments arg arg arg arg arg If Compare Assign If Call Assign If BoolOp Call Call If Call Assign Assign Raise Call Assign Call Call Call Call" + }, + { + "library": "scipy", + "name": "minkowski", + "source_code": "def minkowski(u, v, p=2, w=None):\n u = _validate_vector(u)\n v = _validate_vector(v)\n if p <= 0:\n raise ValueError('p must be greater than 0')\n u_v = u - v\n if w is not None:\n w = _validate_weights(w)\n if p == 1:\n root_w = w\n elif p == 2:\n root_w = np.sqrt(w)\n elif p == np.inf:\n root_w = w != 0\n else:\n root_w = np.power(w, 1 / p)\n u_v = root_w * u_v\n dist = norm(u_v, ord=p)\n return dist", + "docstring": "Compute the Minkowski distance between two 1-D arrays. The Minkowski distance between 1-D arrays and , is defined as .. math:: {\\|u-v\\|}_p = (\\sum{|u_i - v_i|^p})^{1/p}. \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. p : scalar The order of the norm of the difference :math:. Note that for :math:`0 >> from scipy.spatial import distance >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1) 2.0 >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2) 1.4142135623730951 >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3) 1.2599210498948732 >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1) 1.0 >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2) 1.0 >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3) 1.0", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:minkowski arg:u arg:v arg:p arg:w arguments arg arg arg arg Assign Call Assign Call If Compare Raise Call Assign If Compare Assign Call If Compare Assign If Compare Assign Call If Compare Assign Compare Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_accumulate_sharded_grad", + "source_code": "@no_type_check\ndef _accumulate_sharded_grad(state: _FSDPState, handle: FlatParamHandle, sharded_grad: torch.Tensor) -> torch.Tensor:\n flat_param = handle.flat_param\n _cast_grad_to_param_dtype(state, sharded_grad, flat_param)\n accumulate_grad = hasattr(flat_param, '_saved_grad_shard')\n if accumulate_grad:\n _check_grad_to_accumulate(sharded_grad, flat_param._saved_grad_shard)\n flat_param._saved_grad_shard += sharded_grad\n else:\n flat_param._saved_grad_shard = sharded_grad\n grad_to_offload = flat_param._saved_grad_shard\n return grad_to_offload", + "docstring": "Accumulates the reduce-scattered sharded gradient with any existing sharded gradient if needed, returning the gradient to offload (if CPU offloading is enabled).", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_accumulate_sharded_grad arg:state arg:handle arg:sharded_grad arguments arg arg arg Assign Call Assign Call If Call Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_xlim", + "source_code": "def get_xlim(self):\n return tuple(self.viewLim.intervalx)", + "docstring": "Return the x-axis view limits. Returns ------- left, right : (float, float) The current x-axis limits in data coordinates. See Also -------- .Axes.set_xlim .Axes.set_xbound, .Axes.get_xbound .Axes.invert_xaxis, .Axes.xaxis_inverted Notes ----- The x-axis may be inverted, in which case the *left* value will be greater than the *right* value.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_xlim arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "log_json", + "source_code": "def log_json(self, stats: Any) -> None:\n print(stats)", + "docstring": "Logs the stats in json format to stdout.", + "type": "method", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:log_json arg:self arg:stats arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "dispatch_torch_function", + "source_code": "def dispatch_torch_function(tx: 'InstructionTranslator', fn, args, kwargs):\n all_args = _get_all_args(args, kwargs)\n overloaded_args = _get_overloaded_args([arg for arg in all_args if has_torch_function(arg)], _get_subclass_type)\n types = TupleVariable([_get_subclass_type_var(tx, arg) for arg in overloaded_args])\n if tx.symbolic_torch_function_state.in_torch_function_mode():\n res = tx.symbolic_torch_function_state.call_torch_function_mode(tx, fn, types, args, kwargs)\n if not (isinstance(res, ConstantVariable) and res.value is NotImplemented):\n return res\n for arg in overloaded_args:\n res = arg.call_torch_function(tx, fn, types, args, kwargs)\n if not (isinstance(res, ConstantVariable) and res.value is NotImplemented):\n return res\n unimplemented_v2(gb_type='TypeError from user code', context=f'fn={fn!r}, args={args!r}, kwargs={kwargs!r}', explanation=f'All __torch_function__ overrides for for function {fn} returned NotImplemented', hints=[*graph_break_hints.USER_ERROR])", + "docstring": "Gathers all args that are TensorWithTFOverrideVariable and dispatches based on the ordering in _get_overloaded_args", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\variables\\torch_function.py", + "ast_data": "FunctionDef name:dispatch_torch_function arg:tx arg:fn arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call If Call Assign Call If BoolOp Call Compare Return return:yes For Assign Call If BoolOp Call Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "graph", + "source_code": "@property\ndef graph(self):\n if self._tf_sess() is None:\n return None\n return self._tf_sess().graph", + "docstring": "The graph that was launched in this session.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:graph arg:self arguments arg If Compare Call Return return:no Return return:yes Call" + }, + { + "library": "kornia", + "name": "RgbToYuv420", + "source_code": "class RgbToYuv420(Module):\n ONNX_EXPORTABLE = False\n\n def forward(self, yuvinput: Tensor) -> tuple[Tensor, Tensor]:\n return rgb_to_yuv420(yuvinput)", + "docstring": "Convert an image from RGB to YUV420. Width and Height evenly divisible by 2. The image data is assumed to be in the range of :math:. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: YUV420 version of the image. Shape: - image: :math: - output: :math: and :math: Examples: >>> yuvinput = torch.rand(2, 3, 4, 6) >>> yuv = RgbToYuv420() >>> output = yuv(yuvinput) # # (2x1x4x6, 2x1x2x3) Reference:: [1]", + "type": "class", + "file_path": "kornia\\kornia\\color\\yuv.py", + "ast_data": "ClassDef name:RgbToYuv420 Assign FunctionDef name:forward arg:self arg:yuvinput arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "construct_fast", + "source_code": "@classmethod\ndef construct_fast(cls, t, c, k, extrapolate=True, axis=0):\n self = object.__new__(cls)\n self.t, self.c, self.k = (t, c, k)\n self.extrapolate = extrapolate\n self.axis = axis\n return self", + "docstring": "Construct a spline without making checks. Accepts same parameters as the regular constructor. Input arrays and must of correct shape and dtype.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:construct_fast arg:cls arg:t arg:c arg:k arg:extrapolate arg:axis arguments arg arg arg arg arg arg Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "num_feat", + "source_code": "@property\ndef num_feat(self, force=1):\n return capi.get_feature_count(self.ptr, force)", + "docstring": "Return the number of features in the Layer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", + "ast_data": "FunctionDef name:num_feat arg:self arg:force arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "do_pending_operations", + "source_code": "def do_pending_operations(self, model):\n key = (model._meta.app_label, model._meta.model_name)\n for function in self._pending_operations.pop(key, []):\n function(model)", + "docstring": "Take a newly-prepared model and pass it to each function waiting for it. This is called at the very end of Apps.register_model().", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:do_pending_operations arg:self arg:model arguments arg arg Assign For Call Call" + }, + { + "library": "kornia", + "name": "joint_bilateral_blur", + "source_code": "def joint_bilateral_blur(input: Tensor, guidance: Tensor, kernel_size: tuple[int, int] | int, sigma_color: float | Tensor, sigma_space: tuple[float, float] | Tensor, border_type: str='reflect', color_distance_type: str='l1') -> Tensor:\n return _bilateral_blur(input, guidance, kernel_size, sigma_color, sigma_space, border_type, color_distance_type)", + "docstring": "Blur a tensor using a Joint Bilateral filter. .. image:: _static/img/joint_bilateral_blur.png This operator is almost identical to a Bilateral filter. The only difference is that the color Gaussian kernel is computed based on another image called a guidance image. See :func: for more information. Arguments: input: the input tensor with shape :math:. guidance: the guidance tensor with shape :math:. kernel_size: the size of the kernel. sigma_color: the standard deviation for intensity/color Gaussian kernel. Smaller values preserve more edges. sigma_space: the standard deviation for spatial Gaussian kernel. This is similar to `gaussian_blur2d()(B, C, H, W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> guidance = torch.rand(2, 4, 5, 5) >>> output = joint_bilateral_blur(input, guidance, (3, 3), 0.1, (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\bilateral.py", + "ast_data": "FunctionDef name:joint_bilateral_blur arg:input arg:guidance arg:kernel_size arg:sigma_color arg:sigma_space arg:border_type arg:color_distance_type arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "idst", + "source_code": "def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):\n type = _inverse_typemap[type]\n return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)", + "docstring": "Return the Inverse Discrete Sine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xxdst`. .. versionadded:: 0.11.0", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py", + "ast_data": "FunctionDef name:idst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "pygame", + "name": "as_machine_type", + "source_code": "def as_machine_type(size):\n if size == 32:\n return 'x86'\n if size == 64:\n return 'x64'\n raise ValueError('Unknown pointer size {}'.format(size))", + "docstring": "Return pointer bit size as a Windows machine type", + "type": "function", + "file_path": "pygame\\buildconfig\\config_win.py", + "ast_data": "FunctionDef name:as_machine_type arg:size arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call Call" + }, + { + "library": "pytorch", + "name": "cross_product", + "source_code": "def cross_product(*inputs):\n return list(itertools.product(*inputs))", + "docstring": "Return a list of cartesian product of input iterables. For example, cross_product(A, B) returns ((x,y) for x in A for y in B).", + "type": "function", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py", + "ast_data": "FunctionDef name:cross_product arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "render", + "source_code": "def render(pieces, style):\n if pieces['error']:\n return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}\n if not style or style == 'default':\n style = 'pep440'\n if style == 'pep440':\n rendered = render_pep440(pieces)\n elif style == 'pep440-branch':\n rendered = render_pep440_branch(pieces)\n elif style == 'pep440-pre':\n rendered = render_pep440_pre(pieces)\n elif style == 'pep440-post':\n rendered = render_pep440_post(pieces)\n elif style == 'pep440-post-branch':\n rendered = render_pep440_post_branch(pieces)\n elif style == 'pep440-old':\n rendered = render_pep440_old(pieces)\n elif style == 'git-describe':\n rendered = render_git_describe(pieces)\n elif style == 'git-describe-long':\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(f\"unknown style '{style}'\")\n return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}", + "docstring": "Render the given version pieces into the requested style.", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:render arg:pieces arg:style arguments arg arg If Return return:yes Call If BoolOp Compare Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "Returns the key serialized as bytes.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "pytorch", + "name": "__eq__", + "source_code": "def __eq__(cls, other):\n return isvariadic(other) and set(cls.variadic_type) == set(other.variadic_type)", + "docstring": "Return True if other has the same variadic type Parameters ---------- other : object (type) The object (type) to check Returns ------- bool Whether or not is equal to", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py", + "ast_data": "FunctionDef name:__eq__ arg:cls arg:other arguments arg arg Return return:yes BoolOp Call Compare Call Call" + }, + { + "library": "matplotlib", + "name": "_fill_between_x_or_y", + "source_code": "def _fill_between_x_or_y(self, ind_dir, ind, dep1, dep2=0, *, where=None, interpolate=False, step=None, **kwargs):\n dep_dir = mcoll.FillBetweenPolyCollection._f_dir_from_t(ind_dir)\n if not mpl.rcParams['_internal.classic_mode']:\n kwargs = cbook.normalize_kwargs(kwargs, mcoll.Collection)\n if not any((c in kwargs for c in ('color', 'facecolor'))):\n kwargs['facecolor'] = self._get_patches_for_fill.get_next_color()\n ind, dep1, dep2 = self._fill_between_process_units(ind_dir, dep_dir, ind, dep1, dep2, **kwargs)\n collection = mcoll.FillBetweenPolyCollection(ind_dir, ind, dep1, dep2, where=where, interpolate=interpolate, step=step, **kwargs)\n self.add_collection(collection)\n self._request_autoscale_view()\n return collection", + "docstring": "Fill the area between two {dir} curves. The curves are defined by the points (*{ind}*, *{dep}1*) and (*{ind}*, *{dep}2*). This creates one or multiple polygons describing the filled area. You may exclude some {dir} sections from filling using *where*. By default, the edges connect the given points directly. Use *step* if the filling should be a step function, i.e. constant in between *{ind}*. Parameters ---------- {ind} : array-like The {ind} coordinates of the nodes defining the curves. {dep}1 : array-like or float The {dep} coordinates of the nodes defining the first curve. {dep}2 : array-like or float, default: 0 The {dep} coordinates of the nodes defining the second curve. where : array-like of bool, optional Define *where* to exclude some {dir} regions from being filled. The filled regions are defined by the coordinates `.FillBetweenPolyCollection.FillBetweenPolyCollection.FillBetweenPolyCollection.Polygon` properties: %(FillBetweenPolyCollection:kwdoc)s See Also -------- fill_between : Fill between two sets of y-values. fill_betweenx : Fill between two sets of x-values.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:_fill_between_x_or_y arg:self arg:ind_dir arg:ind arg:dep1 arg:dep2 arguments arg arg arg arg arg arg arg arg arg Assign Call If Assign Call If Call Compare Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "reconstruct_from_patches_2d", + "source_code": "@validate_params({'patches': [np.ndarray], 'image_size': [tuple, Hidden(list)]}, prefer_skip_nested_validation=True)\ndef reconstruct_from_patches_2d(patches, image_size):\n i_h, i_w = image_size[:2]\n p_h, p_w = patches.shape[1:3]\n img = np.zeros(image_size)\n n_h = i_h - p_h + 1\n n_w = i_w - p_w + 1\n for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):\n img[i:i + p_h, j:j + p_w] += p\n for i in range(i_h):\n for j in range(i_w):\n img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))\n return img", + "docstring": "Reconstruct the image from all of its patches. Patches are assumed to overlap and the image is constructed by filling in the patches from left to right, top to bottom, averaging the overlapping regions. Read more in the :ref:. Parameters ---------- patches : ndarray of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels) The complete set of patches. If the patches contain colour information, channels are indexed along the last dimension: RGB patches would have . image_size : tuple of int (image_height, image_width) or (image_height, image_width, n_channels) The size of the image that will be reconstructed. Returns ------- image : ndarray of shape image_size The reconstructed image. Examples -------- >>> from sklearn.datasets import load_sample_image >>> from sklearn.feature_extraction import image >>> one_image = load_sample_image(\"china.jpg\") >>> print('Image shape: {}'.format(one_image.shape)) Image shape: (427, 640, 3) >>> image_patches = image.extract_patches_2d(image=one_image, patch_size=(10, 10)) >>> print('Patches shape: {}'.format(image_patches.shape)) Patches shape: (263758, 10, 10, 3) >>> image_reconstructed = image.reconstruct_from_patches_2d( ... patches=image_patches, ... image_size=one_image.shape ... ) >>> print(f\"Reconstructed shape: {image_reconstructed.shape}\") Reconstructed shape: (427, 640, 3)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py", + "ast_data": "FunctionDef name:reconstruct_from_patches_2d arg:patches arg:image_size arguments arg arg Assign Assign Assign Call Assign Assign For Call Call Call Call For Call For Call Call Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "num_coords", + "source_code": "@property\ndef num_coords(self):\n return capi.get_num_coords(self.ptr)", + "docstring": "Return the number of coordinates in the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:num_coords arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "build_2d_sincos_pos_emb", + "source_code": "@staticmethod\ndef build_2d_sincos_pos_emb(w: int, h: int, embed_dim: int, temp: float=10000.0, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n xs = torch.arange(w, device=device, dtype=dtype)\n ys = torch.arange(h, device=device, dtype=dtype)\n grid_x, grid_y = torch_meshgrid([xs, ys], indexing='ij')\n pos_dim = embed_dim // 4\n omega = torch.arange(pos_dim, device=device, dtype=dtype) / pos_dim\n omega = 1.0 / temp ** omega\n out_x = grid_x.reshape(-1, 1) * omega.view(1, -1)\n out_y = grid_y.reshape(-1, 1) * omega.view(1, -1)\n pos_emb = concatenate([out_x.sin(), out_x.cos(), out_y.sin(), out_y.cos()], 1)\n return pos_emb.unsqueeze(1)", + "docstring": "Construct 2D sin-cos positional embeddings. Args: w: width of the image or feature map h: height of the image or feature map embed_dim: embedding dimension temp: temperature coefficient device: device to place the positional embeddings dtype: data type of the positional embeddings Returns: positional embeddings, shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\architecture\\hybrid_encoder.py", + "ast_data": "FunctionDef name:build_2d_sincos_pos_emb arg:w arg:h arg:embed_dim arg:temp arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Call Call Assign Call Call Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "constexpr_next_power_of_2", + "source_code": "@triton_builtin\ndef constexpr_next_power_of_2(n: tl.constexpr, *, _builder: object=None) -> tl.constexpr:\n assert isinstance(n, tl.constexpr)\n return tl.constexpr(triton.next_power_of_2(n.value))", + "docstring": "A version triton.next_power_of_two that can be used within a kernel on constants.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py", + "ast_data": "FunctionDef name:constexpr_next_power_of_2 arg:n arguments arg arg Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "chi2_contingency", + "source_code": "def chi2_contingency(observed, correction=True, lambda_=None, *, method=None):\n observed = np.asarray(observed)\n if np.any(observed < 0):\n raise ValueError('All values in `observed` must be nonnegative.')\n if observed.size == 0:\n raise ValueError('No data; `observed` has size 0.')\n expected = expected_freq(observed)\n if np.any(expected == 0):\n zeropos = list(zip(*np.nonzero(expected == 0)))[0]\n raise ValueError(f'The internally computed table of expected frequencies has a zero element at {zeropos}.')\n if method is not None:\n return _chi2_resampling_methods(observed, expected, correction, lambda_, method)\n dof = expected.size - sum(expected.shape) + expected.ndim - 1\n if dof == 0:\n chi2 = 0.0\n p = 1.0\n else:\n if dof == 1 and correction:\n diff = expected - observed\n direction = np.sign(diff)\n magnitude = np.minimum(0.5, np.abs(diff))\n observed = observed + magnitude * direction\n chi2, p = power_divergence(observed, expected, ddof=observed.size - 1 - dof, axis=None, lambda_=lambda_)\n return Chi2ContingencyResult(chi2, p, dof, expected)", + "docstring": "Chi-square test of independence of variables in a contingency table. This function computes the chi-square statistic and p-value for the hypothesis test of independence of the observed frequencies in the contingency table [1]_ . The expected frequencies are computed based on the marginal sums under the assumption of independence; see . The number of degrees of freedom is (expressed using numpy functions and attributes):: dof = observed.size - sum(observed.shape) + observed.ndim - 1 Parameters ---------- observed : array_like The contingency table. The table contains the observed frequencies (i.e. number of occurrences) in each category. In the two-dimensional case, the table is often described as an \"R x C table\". correction : bool, optional If True, *and* the degrees of freedom is 1, apply Yates' correction for continuity. The effect of the correction is to adjust each observed value by 0.5 towards the corresponding expected value. lambda_ : float or str, optional By default, the statistic computed in this test is Pearson's chi-squared statistic [2]_. allows a statistic from the Cressie-Read power divergence family [3]_ to be used instead. See for details. method : ResamplingMethod, optional Defines the method used to compute the p-value. Compatible only with , default , and two-way tables. If is an instance of /, the p-value is computed using / with the provided configuration options and other appropriate settings. Otherwise, the p-value is computed as documented in the notes. Note that if is an instance of , the `scipy.stats.random_tablemethodobservedhypothesis_chi2_contingencyobservedexpectedobservedscipy.stats.chisquarescipy.stats.chisquarelambda_PermutationMethodMonteCarloMethodmethodcorrection=Falsehypothesis_chi2_contingency`.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\contingency.py", + "ast_data": "FunctionDef name:chi2_contingency arg:observed arg:correction arg:lambda_ arguments arg arg arg arg Assign Call If Call Compare Raise Call If Compare Raise Call Assign Call If Call Compare Assign Call Call Call Compare Raise Call If Compare Return return:yes Call Assign Call If Compare Assign Assign If BoolOp Compare Assign Assign Call Assign Call Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_rewrite_assign", + "source_code": "def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:\n toknum, tokval = tok\n return (toknum, '==' if tokval == '=' else tokval)", + "docstring": "Rewrite the assignment operator for PyTables expressions that use ``. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tuple of int, str Either the input or token or the replacement values", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expr.py", + "ast_data": "FunctionDef name:_rewrite_assign arg:tok arguments arg Assign Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = validate_data(self, X, ensure_min_samples=2, estimator='MinCovDet')\n random_state = check_random_state(self.random_state)\n n_samples, n_features = X.shape\n if (linalg.svdvals(np.dot(X.T, X)) > 1e-08).sum() != n_features:\n warnings.warn('The covariance matrix associated to your dataset is not full rank')\n raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state)\n if self.assume_centered:\n raw_location = np.zeros(n_features)\n raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True)\n precision = linalg.pinvh(raw_covariance)\n raw_dist = np.sum(np.dot(X, precision) * X, 1)\n self.raw_location_ = raw_location\n self.raw_covariance_ = raw_covariance\n self.raw_support_ = raw_support\n self.location_ = raw_location\n self.support_ = raw_support\n self.dist_ = raw_dist\n self.correct_covariance(X)\n self.reweight_covariance(X)\n return self", + "docstring": "Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_robust_covariance.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign If Compare Call Compare Call Call Call Assign Call If Assign Call Assign Call Assign Call Assign Call Call Assign Assign Assign Assign Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "element_spec", + "source_code": "@property\ndef element_spec(self):\n if self._enable_get_next_as_optional and self._strategy.extended._in_multi_worker_mode():\n return nest.map_structure(_rebatch_as_dynamic, self._element_spec, expand_composites=False)\n return self._element_spec", + "docstring": "The type specification of an element of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:element_spec arg:self arguments arg If BoolOp Call Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "set_state", + "source_code": "@contextlib.contextmanager\ndef set_state(state):\n old_state = get_state()\n _uarray.set_state(state)\n try:\n yield\n finally:\n _uarray.set_state(old_state, True)", + "docstring": "A context manager that sets the state of the backends to one returned by :obj:. See Also -------- get_state Gets a state to be set by this context manager.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", + "ast_data": "FunctionDef name:set_state arg:state arguments arg Assign Call Call Try Call" + }, + { + "library": "tensorflow", + "name": "register_read_only_resource_op", + "source_code": "def register_read_only_resource_op(op_type):\n RESOURCE_READ_OPS.add(op_type)", + "docstring": "Declares that does not update its touched resource.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps_utils.py", + "ast_data": "FunctionDef name:register_read_only_resource_op arg:op_type arguments arg Call" + }, + { + "library": "tensorflow", + "name": "get_registered_kernels_for_op", + "source_code": "def get_registered_kernels_for_op(name):\n buf = c_api.TF_GetRegisteredKernelsForOp(name)\n data = c_api.TF_GetBuffer(buf)\n kernel_list = kernel_def_pb2.KernelList()\n kernel_list.ParseFromString(compat.as_bytes(data))\n return kernel_list", + "docstring": "Returns a KernelList proto of registered kernels for a given op. Args: name: A string representing the name of the op whose kernels to retrieve.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\kernels.py", + "ast_data": "FunctionDef name:get_registered_kernels_for_op arg:name arguments arg Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "isdtype", + "source_code": "def isdtype(dtype, kind, *, xp):\n if isinstance(kind, tuple):\n return any((_isdtype_single(dtype, k, xp=xp) for k in kind))\n else:\n return _isdtype_single(dtype, kind, xp=xp)", + "docstring": "Returns a boolean indicating whether a provided dtype is of type \"kind\". Included in the v2022.12 of the Array API spec.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:isdtype arg:dtype arg:kind arguments arg arg arg If Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "barh", + "source_code": "@_docstring.interpd\ndef barh(self, y, width, height=0.8, left=None, *, align='center', data=None, **kwargs):\n kwargs.setdefault('orientation', 'horizontal')\n patches = self.bar(x=left, height=height, width=width, bottom=y, align=align, data=data, **kwargs)\n return patches", + "docstring": "Make a horizontal bar plot. The bars are positioned at *y* with the given *align*\\ment. Their dimensions are given by *width* and *height*. The horizontal baseline is *left* (default 0). Many parameters can take either a single value applying to all bars or a sequence of values, one for each bar. Parameters ---------- y : float or array-like The y coordinates of the bars. See also *align* for the alignment of the bars to the coordinates. Bars are often used for categorical data, i.e. string labels below the bars. You can provide a list of strings directly to *y*. `.BarContainercolorcolorcolorcolor.BarContainer/gallery/statistics/errorbar_featurescolorcolorerrorbar.capsize~.Axes.errorbar.Rectangle/gallery/lines_bars_and_markers/horizontal_barchart_distribution`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:barh arg:self arg:y arg:width arg:height arg:left arguments arg arg arg arg arg arg arg arg Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "set_head_dim_values", + "source_code": "def set_head_dim_values(kernel_options: dict[str, Any], qk_head_dim, v_head_dim, graph_sizevars):\n qk_head_dim_static = graph_sizevars.evaluate_static_shape(qk_head_dim)\n kernel_options.setdefault('QK_HEAD_DIM', qk_head_dim_static)\n kernel_options.setdefault('QK_HEAD_DIM_ROUNDED', next_power_of_two(qk_head_dim_static))\n v_head_dim_static = graph_sizevars.evaluate_static_shape(v_head_dim)\n kernel_options.setdefault('V_HEAD_DIM', v_head_dim_static)\n kernel_options.setdefault('V_HEAD_DIM_ROUNDED', next_power_of_two(v_head_dim_static))\n kernel_options.setdefault('SAFE_HEAD_DIM', is_power_of_2(qk_head_dim_static) and is_power_of_2(v_head_dim_static))", + "docstring": "Mutates kernel options, adding head dimension calculations. Args: kernel_options: Dictionary to populate with options qk_head_dim: Query/Key head dimension v_head_dim: Value head dimension graph_sizevars: Graph size variables object with evaluate_static_shape method", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py", + "ast_data": "FunctionDef name:set_head_dim_values arg:kernel_options arg:qk_head_dim arg:v_head_dim arg:graph_sizevars arguments arg arg arg arg Assign Call Call Call Call Assign Call Call Call Call Call BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "_TensorArrayReadGrad", + "source_code": "@ops.RegisterGradient('TensorArrayRead')\n@ops.RegisterGradient('TensorArrayReadV2')\n@ops.RegisterGradient('TensorArrayReadV3')\ndef _TensorArrayReadGrad(op: ops.Operation, grad):\n handle = op.inputs[0]\n index = op.inputs[1]\n flow = op.inputs[2]\n dtype = op.get_attr('dtype')\n grad_source = _GetGradSource(grad)\n g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n w_g = g.write(index, grad)\n return [None, None, w_g.flow]", + "docstring": "Gradient for TensorArrayRead. Args: op: Forward TensorArrayRead op. grad: Gradient to TensorArrayRead. Returns: A flow , which can be used in control dependencies to force the write of to the gradient .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py", + "ast_data": "FunctionDef name:_TensorArrayReadGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "getDependencies", + "source_code": "def getDependencies(filename):\n external_pat = re.compile('^\\\\s*EXTERNAL\\\\s', re.I)\n routines = []\n with open(filename) as fo:\n for lineno, line in fortranSourceLines(fo):\n m = external_pat.match(line)\n if m:\n names = line[m.end():].strip().split(',')\n names = [n.strip().lower() for n in names]\n names = [n for n in names if n]\n routines.extend(names)\n return routines", + "docstring": "For a Fortran source file, return a list of routines declared as EXTERNAL in it.", + "type": "function", + "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py", + "ast_data": "FunctionDef name:getDependencies arg:filename arguments arg Assign Call Assign With Call For Call Assign Call If Assign Call Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "apply_non_transform_class", + "source_code": "def apply_non_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n return input", + "docstring": "Process class tags corresponding to the inputs that are no transformation applied.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py", + "ast_data": "FunctionDef name:apply_non_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_first_singular_vectors_svd", + "source_code": "def _get_first_singular_vectors_svd(X, y):\n C = np.dot(X.T, y)\n U, _, Vt = svd(C, full_matrices=False)\n return (U[:, 0], Vt[0, :])", + "docstring": "Return the first left and right singular vectors of X'y. Here the whole SVD is computed.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py", + "ast_data": "FunctionDef name:_get_first_singular_vectors_svd arg:X arg:y arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "lorem", + "source_code": "@register.tag\ndef lorem(parser, token):\n bits = list(token.split_contents())\n tagname = bits[0]\n common = bits[-1] != 'random'\n if not common:\n bits.pop()\n if bits[-1] in ('w', 'p', 'b'):\n method = bits.pop()\n else:\n method = 'b'\n if len(bits) > 1:\n count = bits.pop()\n else:\n count = '1'\n count = parser.compile_filter(count)\n if len(bits) != 1:\n raise TemplateSyntaxError('Incorrect format for %r tag' % tagname)\n return LoremNode(count, method, common)", + "docstring": "Create random Latin text useful for providing test data in templates. Usage format:: {% lorem [count] [method] [random] %} ```` outputs two random latin words", + "type": "function", + "file_path": "django\\django\\template\\defaulttags.py", + "ast_data": "FunctionDef name:lorem arg:parser arg:token arguments arg arg Assign Call Call Assign Assign Compare If Call If Compare Assign Call Assign If Compare Call Assign Call Assign Assign Call If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "map_to_output_names", + "source_code": "def map_to_output_names(y_pred, output_names, struct):\n single_output = not nest.is_nested(y_pred)\n outputs_are_flat_list = not single_output and isinstance(y_pred, (list, tuple)) and (not any((nest.is_nested(y_p) for y_p in y_pred)))\n if (single_output or outputs_are_flat_list) and isinstance(struct, dict):\n output_names = output_names or create_pseudo_output_names(y_pred)\n struct = copy.copy(struct)\n new_struct = [struct.pop(name, None) for name in output_names]\n if struct:\n raise ValueError('Found unexpected keys that do not correspond to any Model output: {}. Expected: {}'.format(struct.keys(), output_names))\n if len(new_struct) == 1:\n return new_struct[0]\n return new_struct\n else:\n return struct", + "docstring": "Maps a dict to a list using as keys. This is a convenience feature only. When a 's outputs are a list, you can specify per-output losses and metrics as a dict, where the keys are the output names. If you specify per-output losses and metrics via the same structure as the 's outputs (recommended), no mapping is performed. For the Functional API, the output names are the names of the last layer of each output. For the Subclass API, the output names are determined by (For example: for a list of outputs). This mapping preserves backwards compatibility for and . Args: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied ( is returned unmodified if isn't a flat list). output_names: List. The names of the outputs of the Model. struct: The structure to map. Returns: mapped to a list in same order as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:map_to_output_names arg:y_pred arg:output_names arg:struct arguments arg arg arg Assign Call Assign BoolOp Call Call Call If BoolOp BoolOp Call Assign BoolOp Call Assign Call Assign Call If Raise Call Call Call If Compare Call Return return:yes Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "AutoFieldMeta", + "source_code": "class AutoFieldMeta(type):\n\n @property\n def _subclasses(self):\n return (BigAutoField, SmallAutoField)\n\n def __instancecheck__(self, instance):\n return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)\n\n def __subclasscheck__(self, subclass):\n return issubclass(subclass, self._subclasses) or super().__subclasscheck__(subclass)", + "docstring": "Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields.", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "ClassDef name:AutoFieldMeta FunctionDef name:_subclasses arg:self arguments arg Return return:yes FunctionDef name:__instancecheck__ arg:self arg:instance arguments arg arg Return return:yes BoolOp Call Call Call FunctionDef name:__subclasscheck__ arg:self arg:subclass arguments arg arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "scipy", + "name": "freqresp", + "source_code": "def freqresp(system, w=None, n=10000):\n if isinstance(system, lti):\n if isinstance(system, TransferFunction | ZerosPolesGain):\n sys = system\n else:\n sys = system._as_zpk()\n elif isinstance(system, dlti):\n raise AttributeError('freqresp can only be used with continuous-time systems.')\n else:\n sys = lti(*system)._as_zpk()\n if sys.inputs != 1 or sys.outputs != 1:\n raise ValueError('freqresp() requires a SISO (single input, single output) system.')\n if w is not None:\n worN = w\n else:\n worN = n\n if isinstance(sys, TransferFunction):\n w, h = freqs(sys.num.ravel(), sys.den, worN=worN)\n elif isinstance(sys, ZerosPolesGain):\n w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)\n return (w, h)", + "docstring": "Calculate the frequency response of a continuous-time system. Parameters ---------- system : an instance of the class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of ) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) w : array_like, optional Array of frequencies (in rad/s). Magnitude and phase data is calculated for every value in this array. If not given, a reasonable set will be calculated. n : int, optional Number of frequency points to compute if is not given. The frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/s] H : 1D ndarray Array of complex magnitude values Notes ----- If (num, den) is passed in for `H(s) = \\frac{5}{(s-1)^3}`: >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) >>> w, H = signal.freqresp(s1) >>> plt.figure() >>> plt.plot(H.real, H.imag, \"b\") >>> plt.plot(H.real, -H.imag, \"r\") >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:freqresp arg:system arg:w arg:n arguments arg arg arg If Call If Call Assign Assign Call If Call Raise Call Assign Call Call If BoolOp Compare Compare Raise Call If Compare Assign Assign If Call Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "no_append_slash", + "source_code": "def no_append_slash(view_func):\n if iscoroutinefunction(view_func):\n\n async def _view_wrapper(request, *args, **kwargs):\n return await view_func(request, *args, **kwargs)\n else:\n\n def _view_wrapper(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)\n _view_wrapper.should_append_slash = False\n return wraps(view_func)(_view_wrapper)", + "docstring": "Mark a view function as excluded from CommonMiddleware's APPEND_SLASH redirection.", + "type": "function", + "file_path": "django\\django\\views\\decorators\\common.py", + "ast_data": "FunctionDef name:no_append_slash arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call FunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Iterable", + "source_code": "class Iterable(object):\n\n def __iter__(self):\n pass\n\n def reduce(self, initial_state, reduce_func):\n pass", + "docstring": "Interface for distributed objects that admit iteration/reduction.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", + "ast_data": "ClassDef name:Iterable FunctionDef name:__iter__ arg:self arguments arg FunctionDef name:reduce arg:self arg:initial_state arg:reduce_func arguments arg arg arg" + }, + { + "library": "sphinx", + "name": "MathReferenceTransform", + "source_code": "class MathReferenceTransform(SphinxPostTransform):\n default_priority = 5\n formats = ('latex',)\n\n def run(self, **kwargs: Any) -> None:\n equations = self.env.domains.math_domain.data['objects']\n for node in self.document.findall(addnodes.pending_xref):\n if node['refdomain'] == 'math' and node['reftype'] in {'eq', 'numref'}:\n docname, _ = equations.get(node['reftarget'], (None, None))\n if docname:\n refnode = math_reference('', docname=docname, target=node['reftarget'])\n node.replace_self(refnode)", + "docstring": "Replace pending_xref nodes for math by math_reference. To handle math reference easily on LaTeX writer, this converts pending_xref nodes to math_reference.", + "type": "class", + "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py", + "ast_data": "ClassDef name:MathReferenceTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign For Call If BoolOp Compare Compare Assign Call If Assign Call Call" + }, + { + "library": "pytorch", + "name": "create_python_bindings_sharded", + "source_code": "def create_python_bindings_sharded(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: str | None, filename: str, *, method: bool, num_shards: int, symint: bool=True) -> None:\n grouped = group_filter_overloads(pairs, pred)\n\n def key_func(kv: tuple[BaseOperatorName, list[PythonSignatureNativeFunctionPair]]) -> str:\n return kv[0].base\n\n def env_func(kv: tuple[BaseOperatorName, list[PythonSignatureNativeFunctionPair]]) -> dict[str, list[str]]:\n name, fn_pairs = kv\n return {'ops_headers': [f'#include '], 'py_forwards': list(forward_decls(name, fn_pairs, method=method)), 'py_methods': [method_impl(name, module, fn_pairs, method=method, symint=symint)], 'py_method_defs': [method_def(name, module, fn_pairs, method=method)]}\n fm.write_sharded(filename, grouped.items(), base_env={'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}'}, key_fn=key_func, env_callable=env_func, num_shards=num_shards, sharded_keys={'ops_headers', 'py_forwards', 'py_methods', 'py_method_defs'})", + "docstring": "Generates Python bindings to ATen functions", + "type": "function", + "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py", + "ast_data": "FunctionDef name:create_python_bindings_sharded arg:fm arg:pairs arg:pred arg:module arg:filename arguments arg arg arg arg arg arg arg arg Assign Call FunctionDef name:key_func arg:kv arguments arg Return return:yes FunctionDef name:env_func arg:kv arguments arg Assign Return return:yes Call Call Call Call Call Call Call" + }, + { + "library": "sphinx", + "name": "request", + "source_code": "def request(self, method: str, url: str, _user_agent: str='', _tls_info: tuple[bool, str | dict[str, str] | None]=(), **kwargs: Any) -> requests.Response:\n headers = kwargs.setdefault('headers', {})\n headers.setdefault('User-Agent', _user_agent or _USER_AGENT)\n if _tls_info:\n tls_verify, tls_cacerts = _tls_info\n verify = bool(kwargs.get('verify', tls_verify))\n kwargs.setdefault('verify', verify and _get_tls_cacert(url, tls_cacerts))\n else:\n verify = kwargs.get('verify', True)\n if verify:\n return super().request(method, url, **kwargs)\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=InsecureRequestWarning)\n return super().request(method, url, **kwargs)", + "docstring": "Sends a request with an HTTP verb and url. This sets up User-Agent header and TLS verification automatically.", + "type": "method", + "file_path": "sphinx\\sphinx\\util\\requests.py", + "ast_data": "FunctionDef name:request arg:self arg:method arg:url arg:_user_agent arg:_tls_info arguments arg arg arg arg arg arg Assign Call Call BoolOp If Assign Assign Call Call Call BoolOp Call Assign Call If Return return:yes Call Call With Call Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "KeyNetDetector", + "source_code": "class KeyNetDetector(MultiResolutionDetector):\n\n def __init__(self, pretrained: bool=False, num_features: int=2048, keynet_conf: KeyNet_conf=keynet_default_config, ori_module: Optional[Module]=None, aff_module: Optional[Module]=None) -> None:\n model = KeyNet(pretrained, keynet_conf)\n super().__init__(model, num_features, keynet_conf['Detector_conf'], ori_module, aff_module)", + "docstring": "Multi-scale feature detector based on KeyNet. This is based on the original code from paper \"Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters\". See :cite: for more details. .. image:: _static/img/keynet.jpg Args: pretrained: Download and set pretrained weights to the model. num_features: Number of features to detect. keynet_conf: Dict with initialization parameters. Do not pass it, unless you know what you are doing~kornia.feature.PassLAF~kornia.feature.LAFOrienter~kornia.feature.PassLAF~kornia.feature.LAFAffineShapeEstimator` for details.", + "type": "class", + "file_path": "kornia\\kornia\\feature\\keynet.py", + "ast_data": "ClassDef name:KeyNetDetector FunctionDef name:__init__ arg:self arg:pretrained arg:num_features arg:keynet_conf arg:ori_module arg:aff_module arguments arg arg arg arg arg arg Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "to_dot", + "source_code": "def to_dot(self) -> str:\n edges = '\\n'.join((f'\"{f}\" -> \"{t}\";' for f, t in self.edges))\n return f'digraph G {{\\nrankdir = LR;\\nnode [shape=box];\\n{edges}\\n}}\\n'", + "docstring": "Returns the dot representation of the graph. Returns: A dot representation of the graph.", + "type": "method", + "file_path": "pytorch\\torch\\package\\_digraph.py", + "ast_data": "FunctionDef name:to_dot arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "start_itemproc", + "source_code": "def start_itemproc(self, item: Any, *, response: Response | None) -> Deferred[None]:\n return deferred_from_coro(self.start_itemproc_async(item, response=response))", + "docstring": "Send *item* to the item pipelines for processing. *response* is the source of the item data. If the item does not come from response data, e.g. it was hard-coded, set it to ``.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\scraper.py", + "ast_data": "FunctionDef name:start_itemproc arg:self arg:item arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_device_dict_and_cores", + "source_code": "@staticmethod\ndef _get_device_dict_and_cores(devices):\n device_map = collections.defaultdict(list)\n num_cores = 0\n for device in devices:\n match = _TPU_DEVICE_REGEX.match(device.name)\n if match:\n host_id = match.group('host_id')\n core_id = match.group('core_id')\n device_map[host_id].append(core_id)\n num_cores += 1\n return DeviceDetails(device_map, num_cores)", + "docstring": "Returns a dict of hosts to cores and total cores given devices names. Returns a namedtuple with two attributes: device_map: A map of host_ids to a list of core_ids. total_cores: The total number of cores within the TPU system. Args: devices: A list of devices returned by session.list_devices()", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:_get_device_dict_and_cores arg:devices arguments arg Assign Call Assign For Assign Call If Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_categorical_column_weighted", + "source_code": "def is_categorical_column_weighted(self):\n if isinstance(self.categorical_column, (fc._WeightedCategoricalColumn, fc_lib.WeightedCategoricalColumn)):\n return True\n return False", + "docstring": "Check if the categorical column of the embedding column is weighted.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "UninitializedBuffer", + "source_code": "class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor):\n cls_to_become = torch.Tensor\n\n def __new__(cls, requires_grad=False, device=None, dtype=None, persistent=True) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n data = torch.empty(0, **factory_kwargs)\n ret = torch.Tensor._make_subclass(cls, data, requires_grad)\n ret.persistent = persistent\n ret._is_buffer = True\n return ret", + "docstring": "A buffer that is not initialized. Uninitialized Buffer is a a special case of :class: where the shape of the data is still unknown. Unlike a :class:, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:. The default device or dtype to use when the buffer is materialized can be set during construction using e.g. ``.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\parameter.py", + "ast_data": "ClassDef name:UninitializedBuffer Assign FunctionDef name:__new__ arg:cls arg:requires_grad arg:device arg:dtype arg:persistent arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "compile", + "source_code": "def compile(self, run_eagerly=None, steps_per_execution=None):\n if steps_per_execution is None:\n steps_per_execution = 1\n self._configure_steps_per_execution(steps_per_execution)\n if run_eagerly is None:\n run_eagerly = self.dynamic\n self._run_eagerly = run_eagerly\n self._is_compiled = True", + "docstring": "Configures the layer for . Arguments: run_eagerly: Bool. Defaults to . If , this 's logic will not be wrapped in a . Recommended to leave this as unless your cannot be run inside a . steps_per_execution: Int. Defaults to 1. The number of batches to run during each call. Running multiple batches inside a single call can greatly improve performance on TPUs or small models with a large Python overhead.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", + "ast_data": "FunctionDef name:compile arg:self arg:run_eagerly arg:steps_per_execution arguments arg arg arg If Compare Assign Call If Compare Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "dlrm_wrap", + "source_code": "def dlrm_wrap(X, lS_o, lS_i, device, ndevices=1):\n if ndevices == 1:\n lS_i = [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) else lS_i.to(device)\n lS_o = [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) else lS_o.to(device)\n return (X.to(device), lS_o, lS_i)", + "docstring": "Rewritten simpler version of found in dlrm_s_pytorch.py. This function simply moves the input tensors into the device and without the forward pass", + "type": "function", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py", + "ast_data": "FunctionDef name:dlrm_wrap arg:X arg:lS_o arg:lS_i arg:device arg:ndevices arguments arg arg arg arg arg If Compare Assign Call Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "encrypt", + "source_code": "@abc.abstractmethod\ndef encrypt(self, plaintext: bytes, padding: AsymmetricPadding) -> bytes:\n pass", + "docstring": "Encrypts the given plaintext.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:encrypt arg:self arg:plaintext arg:padding arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "distribute_datasets_from_function", + "source_code": "def distribute_datasets_from_function(self, dataset_fn, options=None):\n return super(OneDeviceStrategy, self).distribute_datasets_from_function(dataset_fn, options)", + "docstring": "Distributes instances created by calls to . will be called once for each worker in the strategy. In this case, we only have one worker and one device so is called once. The should take an instance where information about batching and input replication can be accessed: IMPORTANT: The returned by should have a per-replica batch size, unlike , which uses the global batch size. This may be computed using . Args: dataset_fn: A function taking a instance and returning a . options: used to control options on how this dataset is distributed. Returns: A \"distributed \", which the caller can iterate over like regular datasets.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:distribute_datasets_from_function arg:self arg:dataset_fn arg:options arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "determine_observer_insert_points", + "source_code": "def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> dict[str, dict[str, Any]]:\n obs_ctr = ModelReportObserver\n obs_fqn_to_info: dict[str, dict[str, Any]] = {}\n for fqn, module in prepared_fx_model.named_modules():\n if self._is_supported(module, insert=True):\n targeted_node = self._get_targeting_node(prepared_fx_model, fqn)\n pre_obs_fqn = fqn + '.' + self.DEFAULT_PRE_OBSERVER_NAME\n obs_fqn_to_info[pre_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis), DETECTOR_IS_POST_OBS_KEY: False, DETECTOR_OBS_ARGS_KEY: targeted_node.args}\n return obs_fqn_to_info", + "docstring": "Determines where observers need to be inserted for the Input Weight Equalization Detector. For this detector, we want to place observers in front of supported layers. Currently inserts observers for: linear layers conv layers Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: key \"target_node\" -> the node we are trying to observe with this observer (torch.fx.node.Node) key \"observer_to_insert\" -> the observer we wish to insert (ObserverBase) key \"is_post_observer\" -> True if this is meant to be a post-observer for target_node, False if pre-observer key \"observer_args\" -> The arguments that are meant to be passed into the observer", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:prepared_fx_model arguments arg arg Assign For Call If Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_random", + "source_code": "def _random(self, n: IntNumber=1, *, workers: IntNumber=1) -> np.ndarray:\n sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)\n if n == 0:\n return sample\n total_n = self.num_generated + n\n if total_n > self.maxn:\n msg = f'At most 2**{self.bits}={self.maxn} distinct points can be generated. {self.num_generated} points have been previously generated, then: n={self.num_generated}+{n}={total_n}. '\n if self.bits != 64:\n msg += 'Consider increasing `bits`.'\n raise ValueError(msg)\n if self.num_generated == 0:\n if not n & n - 1 == 0:\n warnings.warn(\"The balance properties of Sobol' points require n to be a power of 2.\", stacklevel=3)\n if n == 1:\n sample = self._first_point\n else:\n _draw(n=n - 1, num_gen=self.num_generated, dim=self.d, scale=self._scale, sv=self._sv, quasi=self._quasi, sample=sample)\n sample = np.concatenate([self._first_point, sample])[:n]\n else:\n _draw(n=n, num_gen=self.num_generated - 1, dim=self.d, scale=self._scale, sv=self._sv, quasi=self._quasi, sample=sample)\n return sample", + "docstring": "Draw next point(s) in the Sobol' sequence. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- sample : array_like (n, d) Sobol' sample.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_random arg:self arg:n arguments arg arg arg Call If Compare Return return:yes Assign If Compare Assign If Compare Raise Call If Compare If Compare Call If Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_pad_strides", + "source_code": "@staticmethod\ndef _pad_strides(in_strides, size, dtype):\n align = get_align_for_dtype(dtype)\n if len(in_strides) == 0:\n return in_strides\n if not config.pad_channels_last and Layout.is_channels_last_contiguous(size, in_strides):\n return in_strides\n current_fx_node = V.get_current_node()\n if hasattr(current_fx_node, 'meta') and current_fx_node.meta.get('dislike_padding', False):\n return in_strides\n if not all((isinstance(s, (int, sympy.Integer)) for s in itertools.chain(in_strides, size))):\n return in_strides\n stride_order = get_stride_order(in_strides)\n fill_order = stride_order2fill_order(stride_order)\n new_strides = [0 for _ in range(len(in_strides))]\n new_strides[fill_order[0]] = 1\n padded = False\n for rank, idx in enumerate(fill_order[1:], start=1):\n prev_idx = fill_order[rank - 1]\n stride = new_strides[prev_idx] * size[prev_idx]\n if stride > config.padding_stride_threshold and stride % align != 0:\n stride = ceildiv(stride, align) * align\n padded = True\n new_strides[idx] = stride\n if not padded:\n return in_strides\n metrics.num_comprehensive_padding += 1\n return new_strides", + "docstring": "The padding does not change stride order but makes sure all strides larger than the threshold are multiple of align.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:_pad_strides arg:in_strides arg:size arg:dtype arguments arg arg arg Assign Call If Compare Call Return return:yes If BoolOp Call Return return:yes Assign Call If BoolOp Call Call Return return:yes If Call Call Call Return return:yes Assign Call Assign Call Assign Call Call Assign Assign For Call Assign Assign If BoolOp Compare Compare Assign Call Assign Assign If Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "rename", + "source_code": "def rename(self, index: Renamer | Hashable | None=None, *, axis: Axis | None=None, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False, level: Level | None=None, errors: IgnoreRaise='ignore') -> Series | None:\n self._check_copy_deprecation(copy)\n if axis is not None:\n axis = self._get_axis_number(axis)\n if callable(index) or is_dict_like(index):\n return super()._rename(index, inplace=inplace, level=level, errors=errors)\n else:\n return self._set_name(index, inplace=inplace)", + "docstring": "Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change `user guide copyCopy-on-Write copycopycopyKeyErrordict-like mapperindex` and index is not a dict or callable else None. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename(\"my_name\") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x**2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:rename arg:self arg:index arguments arg arg arg arg arg arg arg Call If Compare Assign Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_SegmentMeanGrad", + "source_code": "@ops.RegisterGradient('SegmentMean')\ndef _SegmentMeanGrad(op: ops.Operation, grad):\n data_rank = array_ops.rank(op.inputs[0])\n segment_ids_shape = array_ops.shape(op.inputs[1])\n remaining_shape = array_ops.ones(array_ops.expand_dims(data_rank - 1, 0), dtype=segment_ids_shape.dtype)\n ones_shape = array_ops.concat([segment_ids_shape, remaining_shape], 0)\n ones = array_ops.ones(ones_shape, dtype=grad.dtype)\n scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))\n return (array_ops.gather(scaled_grad, op.inputs[1]), None)", + "docstring": "Gradient for SegmentMean.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_SegmentMeanGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_isna_array", + "source_code": "def _isna_array(values: ArrayLike) -> npt.NDArray[np.bool_] | NDFrame:\n dtype = values.dtype\n result: npt.NDArray[np.bool_] | NDFrame\n if not isinstance(values, np.ndarray):\n result = values.isna()\n elif isinstance(values, np.rec.recarray):\n result = _isna_recarray_dtype(values)\n elif is_string_or_object_np_dtype(values.dtype):\n result = _isna_string_dtype(values)\n elif dtype.kind in 'mM':\n result = values.view('i8') == iNaT\n else:\n result = np.isnan(values)\n return result", + "docstring": "Return an array indicating which values of the input array are NaN / NA. Parameters ---------- obj: ndarray or ExtensionArray The input array whose elements are to be checked. Returns ------- array-like Array of boolean values denoting the NA status of each element.", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\missing.py", + "ast_data": "FunctionDef name:_isna_array arg:values arguments arg Assign If Call Assign Call If Call Assign Call If Call Assign Call If Compare Assign Compare Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_convert_to_protection", + "source_code": "@classmethod\ndef _convert_to_protection(cls, protection_dict):\n from openpyxl.styles import Protection\n return Protection(**protection_dict)", + "docstring": "Convert `` to an openpyxl v2 Protection object. Parameters ---------- protection_dict : dict A dict with zero or more of the following keys. 'locked' 'hidden' Returns -------", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py", + "ast_data": "FunctionDef name:_convert_to_protection arg:cls arg:protection_dict arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_shard_state_dict", + "source_code": "def _shard_state_dict(state_dict: dict[str, torch.Tensor], placement_strategies: dict[Node, PlacementStrategy], graph_signature: ExportGraphSignature, mesh: DeviceMesh) -> None:\n for node, placement_strategy in placement_strategies.items():\n if node.op != 'placeholder':\n continue\n if node.name in graph_signature.inputs_to_parameters:\n fqn = graph_signature.inputs_to_parameters[node.name]\n elif node.name in graph_signature.inputs_to_buffers:\n fqn = graph_signature.inputs_to_buffers[node.name]\n else:\n continue\n assert fqn in state_dict, f'{fqn} not found in state dict: {state_dict.keys()}'\n original_param = state_dict[fqn]\n dtensor_param = distribute_tensor(original_param, mesh, placement_strategy.output_spec.placements)\n local_param = dtensor_param.to_local()\n state_dict[fqn] = torch.nn.Parameter(local_param) if isinstance(original_param, torch.nn.Parameter) else local_param", + "docstring": "Inplace partition the weights based on the placement strategy", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py", + "ast_data": "FunctionDef name:_shard_state_dict arg:state_dict arg:placement_strategies arg:graph_signature arg:mesh arguments arg arg arg arg For Call If Compare If Compare Assign If Compare Assign Compare Call Assign Assign Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_get_ops_from_nodedefs", + "source_code": "def _get_ops_from_nodedefs(node_defs):\n ops = set()\n for node_def in node_defs:\n op_and_kernel = get_ops_from_nodedef(node_def)\n if op_and_kernel:\n ops.add(op_and_kernel)\n return ops", + "docstring": "Gets the ops and kernels needed from the list of NodeDef. If a NodeDef's op is not in the allowlist of ops without kernel and there is no kernel found for this NodeDef, then skip that NodeDef and proceed to the next one. Args: node_defs: list of NodeDef's to get op/kernel information. Returns: A set of (op_name, kernel_name) tuples.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py", + "ast_data": "FunctionDef name:_get_ops_from_nodedefs arg:node_defs arguments arg Assign Call For Assign Call If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "while_stmt", + "source_code": "def while_stmt(test, body, get_state, set_state, symbol_names, opts):\n with func_graph.FuncGraph('tmp').as_default():\n init_test = test()\n if tensors.is_dense_tensor(init_test):\n _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)\n return\n if not init_test:\n return\n body()\n _py_while_stmt(test, body, get_state, set_state, opts)", + "docstring": "Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: test: Callable with boolean return type. The loop condition. body: Callable representing the actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. symbol_names: Tuple containing the names of all loop variables. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:while_stmt arg:test arg:body arg:get_state arg:set_state arg:symbol_names arg:opts arguments arg arg arg arg arg arg With Call Call Assign Call If Call Call Return return:no If Return return:no Call Call" + }, + { + "library": "django", + "name": "StaticFilesStorage", + "source_code": "class StaticFilesStorage(FileSystemStorage):\n\n def __init__(self, location=None, base_url=None, *args, **kwargs):\n if location is None:\n location = settings.STATIC_ROOT\n if base_url is None:\n base_url = settings.STATIC_URL\n check_settings(base_url)\n super().__init__(location, base_url, *args, **kwargs)\n if not location:\n self.base_location = None\n self.location = None\n\n def path(self, name):\n if not self.location:\n raise ImproperlyConfigured(\"You're using the staticfiles app without having set the STATIC_ROOT setting to a filesystem path.\")\n return super().path(name)", + "docstring": "Standard file system storage for static files. The defaults for ``.", + "type": "class", + "file_path": "django\\django\\contrib\\staticfiles\\storage.py", + "ast_data": "ClassDef name:StaticFilesStorage FunctionDef name:__init__ arg:self arg:location arg:base_url arguments arg arg arg arg arg If Compare Assign If Compare Assign Call Call Call If Assign Assign FunctionDef name:path arg:self arg:name arguments arg arg If Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "DistributedDatasetsFromFunctionV1", + "source_code": "class DistributedDatasetsFromFunctionV1(input_lib.DistributedDatasetsFromFunction):\n\n def _make_initializable_iterator(self, shared_name=None):\n del shared_name\n if context.executing_eagerly():\n raise ValueError('Cannot create initializable iterator in Eager mode. Please use `iter()` instead.')\n return self._get_iterator()\n\n def _make_one_shot_iterator(self):\n if not context.executing_eagerly():\n raise ValueError('Cannot create a one shot iterator. Please use `make_initializable_iterator()` instead.')\n return self._get_iterator()\n\n def _get_iterator(self):\n iterators = _create_iterators_per_worker(self._datasets, self._input_workers, self._options)\n cardinality = input_lib._cardinality(self._datasets[0])\n iterator = DistributedIteratorV1(self._input_workers, iterators, self._strategy, cardinality, self._enable_get_next_as_optional)\n iterator._element_spec = self._element_spec\n if context.executing_eagerly():\n context.async_wait()\n return iterator\n\n def __iter__(self):\n if ops.executing_eagerly_outside_functions() or ops.get_default_graph().building_function:\n return self._get_iterator()\n raise RuntimeError('__iter__() is only supported inside of tf.function or when eager execution is enabled.')", + "docstring": "Inputs created from dataset function.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "ClassDef name:DistributedDatasetsFromFunctionV1 FunctionDef name:_make_initializable_iterator arg:self arg:shared_name arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:_make_one_shot_iterator arg:self arguments arg If Call Raise Call Return return:yes Call FunctionDef name:_get_iterator arg:self arguments arg Assign Call Assign Call Assign Call Assign If Call Call Return return:yes FunctionDef name:__iter__ arg:self arguments arg If BoolOp Call Call Return return:yes Call Raise Call" + }, + { + "library": "django", + "name": "get_success_url", + "source_code": "def get_success_url(self):\n if not self.success_url:\n raise ImproperlyConfigured('No URL to redirect to. Provide a success_url.')\n return str(self.success_url)", + "docstring": "Return the URL to redirect to after processing a valid form.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:get_success_url arg:self arguments arg If Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "device_name", + "source_code": "@property\ndef device_name(self):\n return self._device_name", + "docstring": "Name of the device that the tensor belongs to. Returns: () device name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:device_name arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "score_variant", + "source_code": "def score_variant(self, variant1, variant2):\n if variant1 == variant2:\n return 0.0\n else:\n return 1.0", + "docstring": "Return a match score between *variant1* and *variant2*. An exact match returns 0.0, otherwise 1.0.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:score_variant arg:self arg:variant1 arg:variant2 arguments arg arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "indirect_indexing", + "source_code": "def indirect_indexing(self, index_var: str, size, check, wrap_neg=True):\n return sympy_index_symbol(str(index_var))", + "docstring": "Convert index variable to symbolic form.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:indirect_indexing arg:self arg:index_var arg:size arg:check arg:wrap_neg arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "flip_cutlass_layout", + "source_code": "@staticmethod\ndef flip_cutlass_layout(cutlass_layout: 'cutlass_lib.LayoutType') -> 'cutlass_lib.LayoutType':\n assert cutlass_utils.try_import_cutlass()\n import cutlass_library.library as cutlass_lib\n if cutlass_layout == cutlass_lib.LayoutType.RowMajor:\n return cutlass_lib.LayoutType.ColumnMajor\n else:\n return cutlass_lib.LayoutType.RowMajor", + "docstring": "Helper method: Flips a given cutlass layout (cutlass_lib.LayoutType) from RowMajor to ColumnMajor or vice versa", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py", + "ast_data": "FunctionDef name:flip_cutlass_layout arg:cutlass_layout arguments arg Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "getsource", + "source_code": "def getsource(object):\n return _inspect.getsource(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.getsource.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:getsource arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_compute_covariance", + "source_code": "def _compute_covariance(self):\n self.factor = self.covariance_factor()\n if not hasattr(self, '_data_cho_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights))\n self._data_cho_cov = linalg.cholesky(self._data_covariance, lower=True)\n self.covariance = self._data_covariance * self.factor ** 2\n self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)\n self.log_det = 2 * np.log(np.diag(self.cho_cov * np.sqrt(2 * pi))).sum()", + "docstring": "Computes the covariance matrix for each Gaussian kernel using covariance_factor().", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_kde.py", + "ast_data": "FunctionDef name:_compute_covariance arg:self arguments arg Assign Call If Call Assign Call Call Assign Call Assign Assign Call Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "OpenMLError", + "source_code": "class OpenMLError(ValueError):\n pass", + "docstring": "HTTP 412 is a specific OpenML error code, indicating a generic error", + "type": "class", + "file_path": "scikit-learn\\sklearn\\datasets\\_openml.py", + "ast_data": "ClassDef name:OpenMLError" + }, + { + "library": "tensorflow", + "name": "in_load_context", + "source_code": "def in_load_context():\n return _load_context.in_load_context()", + "docstring": "Returns whether under a load context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\load_context.py", + "ast_data": "FunctionDef name:in_load_context arguments Return return:yes Call" + }, + { + "library": "numpy", + "name": "exists", + "source_code": "def exists(self, path):\n return DataSource.exists(self, self._fullpath(path))", + "docstring": "Test if path exists prepending Repository base URL to path. Test if exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. This may, but does not have to, include the with which the was initialized. Returns ------- out : bool True if exists. Notes ----- When is an URL, will return True if it's either stored locally in the directory, or is a valid remote URL. does not discriminate between the two, the file is accessible if it exists in either location.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_datasource.py", + "ast_data": "FunctionDef name:exists arg:self arg:path arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)", + "docstring": "Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. eos_coef: relative classification weight applied to the no-object category losses: list of all the losses to be applied. See get_loss for list of available losses.", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_classes arg:matcher arg:weight_dict arg:eos_coef arg:losses arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "sess_str", + "source_code": "@property\ndef sess_str(self):\n raise NotImplementedError('sess_str')", + "docstring": "The TensorFlow process to which this session will connect.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:sess_str arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "initialize_rng_states", + "source_code": "def initialize_rng_states(num_rng: int, graphsafe_idx: int, fwd_rng_states: list[torch.Generator], bwd_rng_states: list[torch.Generator]):\n with torch.utils._python_dispatch._disable_current_modes():\n seeds = torch.randint(0, torch.iinfo(torch.int64).max, (num_rng,), device='cpu')\n fwd_rng_states.extend([torch.cuda.default_generators[graphsafe_idx].clone_state().manual_seed(int(seeds[i])) for i in range(num_rng)])\n bwd_rng_states.extend([torch.cuda.default_generators[graphsafe_idx].clone_state().manual_seed(int(seeds[i])) for i in range(num_rng)])", + "docstring": "Initialize the cudagraph safe rng states. Initialization of rng states should have a few properties: - the initialization for each rng state should be independent - the initialization should be deterministic - the initialization should be based off current rng state, so that independent graphs do not have equal rng behavior We defer initialization of rng states until runtime because compilation is wrapped with preserve_rng_states. Seed initialization should advance the rng states so consecutive compilations do not give equal randomness.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", + "ast_data": "FunctionDef name:initialize_rng_states arg:num_rng arg:graphsafe_idx arg:fwd_rng_states arg:bwd_rng_states arguments arg arg arg arg With Call Assign Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "max_pooling1d", + "source_code": "def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n warnings.warn('`tf.layers.max_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling1D` instead.')\n layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n return layer.apply(inputs)", + "docstring": "Max Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py", + "ast_data": "FunctionDef name:max_pooling1d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "iter_default_settings", + "source_code": "def iter_default_settings() -> Iterable[tuple[str, Any]]:\n for name in dir(default_settings):\n if name.isupper():\n yield (name, getattr(default_settings, name))", + "docstring": "Return the default settings as an iterator of (name, value) tuples", + "type": "function", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:iter_default_settings arguments For Call If Call Call" + }, + { + "library": "pytorch", + "name": "get_alignments", + "source_code": "@functools.lru_cache(32)\ndef get_alignments(torch_dtype: torch.dtype) -> list[int]:\n if torch_dtype in (torch.half, torch.bfloat16):\n return [8, 4, 2, 1]\n elif torch_dtype == torch.float:\n return [4, 2, 1]\n elif torch_dtype in (torch.uint8, torch.int8, torch.float8_e4m3fn):\n return [16, 8, 4, 2]\n elif torch_dtype == torch.int32:\n return [4, 2, 1]\n else:\n raise NotImplementedError(f'unsupported torch_dtype={torch_dtype!r} for alignments')", + "docstring": "Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype. CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py", + "ast_data": "FunctionDef name:get_alignments arg:torch_dtype arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call Call" + }, + { + "library": "matplotlib", + "name": "make_layoutgrids_gs", + "source_code": "def make_layoutgrids_gs(layoutgrids, gs):\n if gs in layoutgrids or gs.figure is None:\n return layoutgrids\n layoutgrids['hasgrids'] = True\n if not hasattr(gs, '_subplot_spec'):\n parent = layoutgrids[gs.figure]\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios())\n else:\n subplot_spec = gs._subplot_spec\n parentgs = subplot_spec.get_gridspec()\n if parentgs not in layoutgrids:\n layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs)\n subspeclb = layoutgrids[parentgs]\n rep = (gs, 'top')\n if rep not in layoutgrids:\n layoutgrids[rep] = mlayoutgrid.LayoutGrid(parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan))\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios())\n return layoutgrids", + "docstring": "Make the layoutgrid for a gridspec (and anything nested in the gridspec)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py", + "ast_data": "FunctionDef name:make_layoutgrids_gs arg:layoutgrids arg:gs arguments arg arg If BoolOp Compare Compare Return return:yes Assign If Call Assign Assign Call Call Call Assign Assign Call If Compare Assign Call Assign Assign If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_platform", + "source_code": "def get_platform():\n global PLATFORM\n cmd = 'uname'\n out, err = run_shell_cmd(cmd)\n platform_detected = out.strip().lower()\n if platform_detected != 'linux':\n if err and FLAGS.debug:\n print('Error in detecting platform:\\n %s' % str(err))\n print('Error: Detected unsupported operating system.\\nStopping...')\n sys.exit(1)\n else:\n PLATFORM = platform_detected\n return PLATFORM", + "docstring": "Retrieves platform information. Currently the script only support linux. If other platoforms such as Windows or MacOS is detected, it throws an error and terminates. Returns: String that is platform type. e.g. 'linux'", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", + "ast_data": "FunctionDef name:get_platform arguments Assign Assign Call Assign Call Call If Compare If BoolOp Call Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_chief", + "source_code": "@property\ndef is_chief(self):\n return self._is_chief", + "docstring": "Return True if this is a chief supervisor. Returns: A bool.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:is_chief arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_verticalalignment", + "source_code": "def set_verticalalignment(self, align):\n _api.check_in_list(['top', 'bottom', 'center', 'baseline', 'center_baseline'], align=align)\n self._verticalalignment = align\n self.stale = True", + "docstring": "Set the vertical alignment relative to the anchor point. See also :doc:. Parameters ---------- align : {'baseline', 'bottom', 'center', 'center_baseline', 'top'}", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_verticalalignment arg:self arg:align arguments arg arg Call Assign Assign" + }, + { + "library": "pytorch", + "name": "Default", + "source_code": "class Default:\n pass", + "docstring": "Singleton default object that will cause the ConfigFuzzer to always use the default value set in the config.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "ClassDef name:Default" + }, + { + "library": "scipy", + "name": "circulant", + "source_code": "def circulant(c):\n c = np.atleast_1d(c)\n batch_shape, N = (c.shape[:-1], c.shape[-1])\n c = c.reshape(math.prod(batch_shape), N) if batch_shape else c\n c_ext = np.concatenate((c[..., ::-1], c[..., :0:-1]), axis=-1).ravel()\n L = c.shape[-1]\n n = c_ext.strides[-1]\n if c.ndim == 1:\n A = as_strided(c_ext[L - 1:], shape=(L, L), strides=(-n, n))\n else:\n m = c.shape[0]\n A = as_strided(c_ext[L - 1:], shape=(m, L, L), strides=(n * (2 * L - 1), -n, n))\n return A.reshape(batch_shape + (N, N)).copy()", + "docstring": "Construct a circulant matrix. Parameters ---------- c : (..., N,) array_like The first column(s) of the matrix. Multidimensional arrays are treated as a batch: each slice along the last axis is the first column of an output matrix. Returns ------- A : (..., N, N) ndarray A circulant matrix whose first column is given by . For batch input, each slice of shape `` along the last dimension of the input. See Also -------- toeplitz : Toeplitz matrix hankel : Hankel matrix solve_circulant : Solve a circulant system. Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import circulant >>> circulant([1, 2, 3]) array([[1, 3, 2], [2, 1, 3], [3, 2, 1]]) >>> circulant([[1, 2, 3], [4, 5, 6]]) array([[[1, 3, 2], [2, 1, 3], [3, 2, 1]], [[4, 6, 5], [5, 4, 6], [6, 5, 4]]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", + "ast_data": "FunctionDef name:circulant arg:c arguments arg Assign Call Assign Assign Call Call Assign Call Call Assign Assign If Compare Assign Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "guard_cooperative_store", + "source_code": "def guard_cooperative_store(self, name, buffer):\n idx = self.cooperative_reduction_workspace_cache.increment_store_count()\n buffer.writeline(DeferredLine(name, f'if rsplit_id == ({idx} % RSPLIT):'))\n return buffer.indent()", + "docstring": "For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:guard_cooperative_store arg:self arg:name arg:buffer arguments arg arg arg Assign Call Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "start", + "source_code": "def start(self):\n if self.thread is None:\n self.mtimes = {}\n Monitor.start(self)", + "docstring": "Start our own background task thread for self.run.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:start arg:self arguments arg If Compare Assign Call" + }, + { + "library": "pytorch", + "name": "unshard", + "source_code": "def unshard(self, async_op: bool=False) -> Optional[UnshardHandle]:\n state = self._get_fsdp_state()\n fsdp_param_group = state._fsdp_param_group\n if fsdp_param_group is not None:\n fsdp_param_group.lazy_init()\n fsdp_param_group.unshard(async_op=async_op)\n handle = _UnshardHandleImpl(fsdp_param_group)\n if async_op:\n return handle\n handle.wait()\n return None", + "docstring": "Unshards the module's parameters by allocating memory and all-gathering the parameters. This method is *not* recursive. The unshard follows the :class:, so it will all-gather following `UnshardHandlewaitwait` explicitly if the wait should happen before pre-forward.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", + "ast_data": "FunctionDef name:unshard arg:self arg:async_op arguments arg arg Assign Call Assign If Compare Call Call Assign Call If Return return:yes Call Return return:no" + }, + { + "library": "authlib", + "name": "generate_client_registration_info", + "source_code": "def generate_client_registration_info(self, client, request):\n return None", + "docstring": "Generate ``` by default. Developers MAY rewrite this method to return registration information.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py", + "ast_data": "FunctionDef name:generate_client_registration_info arg:self arg:client arg:request arguments arg arg arg Return return:no" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n check_is_fitted(self)\n return self._loss.link.inverse(self._raw_predict(X).ravel())", + "docstring": "Predict values for X. Parameters ---------- X : array-like, shape (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape (n_samples,) The predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "_unicode_ci_compare", + "source_code": "def _unicode_ci_compare(s1, s2):\n return unicodedata.normalize('NFKC', s1).casefold() == unicodedata.normalize('NFKC', s2).casefold()", + "docstring": "Perform case-insensitive comparison of two identifiers, using the recommended algorithm from Unicode Technical Report 36, section 2.11.2(B)(2).", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\forms.py", + "ast_data": "FunctionDef name:_unicode_ci_compare arg:s1 arg:s2 arguments arg arg Return return:yes Compare Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "tick_bottom", + "source_code": "def tick_bottom(self):\n label = True\n if 'label1On' in self._major_tick_kw:\n label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n self.set_ticks_position('bottom')\n self.set_tick_params(which='both', labelbottom=label)", + "docstring": "Move ticks and ticklabels (if present) to the bottom of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:tick_bottom arg:self arguments arg Assign If Compare Assign BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "ConcatAggregator", + "source_code": "class ConcatAggregator(Aggregator):\n\n def __init__(self, batch_size):\n self.composite = None\n super(ConcatAggregator, self).__init__(use_steps=True, num_samples=None, steps=None, batch_size=batch_size)\n\n def create(self, batch_element):\n self.composite = is_composite_or_composite_value(batch_element)\n\n def aggregate(self, batch_element, batch_start=None, batch_end=None):\n if self.batch_size and self.batch_size < batch_element.shape[0]:\n raise ValueError('Mismatch between expected batch size and model output batch size. Output shape = {}, expected output shape = shape {}'.format(batch_element.shape, (self.batch_size,) + batch_element.shape[1:]))\n self.results.append(batch_element)\n\n def finalize(self):\n if len(self.results) == 1:\n self.results = self.results[0]\n elif self.composite:\n results = self.results[0]\n for r in self.results[1:]:\n results = _append_composite_tensor(results, r)\n self.results = results\n else:\n self.results = np.concatenate(self.results, axis=0)", + "docstring": "Combine tensor-likes which cannot be merged on the fly. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "ClassDef name:ConcatAggregator FunctionDef name:__init__ arg:self arg:batch_size arguments arg arg Assign Call Call FunctionDef name:create arg:self arg:batch_element arguments arg arg Assign Call FunctionDef name:aggregate arg:self arg:batch_element arg:batch_start arg:batch_end arguments arg arg arg arg If BoolOp Compare Raise Call Call Call FunctionDef name:finalize arg:self arguments arg If Compare Call Assign If Assign For Assign Call Assign Assign Call" + }, + { + "library": "kornia", + "name": "to_tensorflow", + "source_code": "def to_tensorflow() -> ModuleType:\n return ivy.transpile(kornia, source='torch', target='tensorflow')", + "docstring": "Convert Kornia to TensorFlow. Transpiles the Kornia library to TensorFlow using [ivy]( The transpilation process occurs lazily, so the transpilation on a given kornia function/class will only occur when it's called or instantiated for the first time. This will make any functions/classes slow when being used for the first time, but any subsequent uses should be as fast as expected. Return: The Kornia library transpiled to TensorFlow Example: .. highlight:: python .. code-block:: python import kornia tf_kornia = kornia.to_tensorflow() import tensorflow as tf input = tf.random.normal((2, 3, 4, 5)) gray = tf_kornia.color.gray.rgb_to_grayscale(input)", + "type": "function", + "file_path": "kornia\\kornia\\transpiler\\transpiler.py", + "ast_data": "FunctionDef name:to_tensorflow arguments Return return:yes Call" + }, + { + "library": "scipy", + "name": "basis_element", + "source_code": "@classmethod\ndef basis_element(cls, t, extrapolate=True):\n k = len(t) - 2\n t = _as_float_array(t)\n t = np.r_[(t[0] - 1,) * k, t, (t[-1] + 1,) * k]\n c = np.zeros_like(t)\n c[k] = 1.0\n return cls.construct_fast(t, c, k, extrapolate)", + "docstring": "Return a B-spline basis element `tktt`, and compare to its explicit form: >>> t = [0, 1, 1, 2] >>> b = BSpline.basis_element(t) >>> def f(x): ... return np.where(x >> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> x = np.linspace(0, 2, 51) >>> ax.plot(x, b(x), 'g', lw=3) >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4) >>> ax.grid(True) >>> plt.show()", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:basis_element arg:cls arg:t arg:extrapolate arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "in_progress", + "source_code": "def in_progress(self) -> bool:\n return self._level > 0", + "docstring": "True if we've entered the context.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", + "ast_data": "FunctionDef name:in_progress arg:self arguments arg Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "_auto_legend_data", + "source_code": "def _auto_legend_data(self, renderer):\n assert self.isaxes\n bboxes = []\n lines = []\n offsets = []\n for artist in self.parent._children:\n if isinstance(artist, Line2D):\n lines.append(artist.get_transform().transform_path(artist.get_path()))\n elif isinstance(artist, Rectangle):\n bboxes.append(artist.get_bbox().transformed(artist.get_data_transform()))\n elif isinstance(artist, Patch):\n lines.append(artist.get_transform().transform_path(artist.get_path()))\n elif isinstance(artist, PolyCollection):\n lines.extend((artist.get_transform().transform_path(path) for path in artist.get_paths()))\n elif isinstance(artist, Collection):\n transform, transOffset, hoffsets, _ = artist._prepare_points()\n if len(hoffsets):\n offsets.extend(transOffset.transform(hoffsets))\n elif isinstance(artist, Text):\n bboxes.append(artist.get_window_extent(renderer))\n return (bboxes, lines, offsets)", + "docstring": "Return display coordinates for hit testing for \"best\" positioning. Returns ------- bboxes List of bounding boxes of all patches. lines List of corresponding to each line. offsets List of (x, y) offsets of all collection.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:_auto_legend_data arg:self arg:renderer arguments arg arg Assign Assign Assign For If Call Call Call Call Call If Call Call Call Call Call If Call Call Call Call Call If Call Call Call Call Call If Call Assign Call If Call Call Call If Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_show_mean_and_variance", + "source_code": "def _show_mean_and_variance(tensor, cast_to_f32=True):\n if cast_to_f32:\n tensor = math_ops.cast(tensor, dtypes.float32)\n mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])\n if not mean.get_shape().is_fully_defined():\n mean = array_ops.reshape(mean, [])\n if not var.get_shape().is_fully_defined():\n var = array_ops.reshape(var, [])\n return (mean, var)", + "docstring": "Returns the mean and variance of the given tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_show_mean_and_variance arg:tensor arg:cast_to_f32 arguments arg arg If Assign Call Assign Call Call If Call Call Assign Call If Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_process_levels", + "source_code": "def _process_levels(self):\n self._levels = list(self.levels)\n if self.logscale:\n lower, upper = (1e-250, 1e+250)\n else:\n lower, upper = (-1e+250, 1e+250)\n if self.extend in ('both', 'min'):\n self._levels.insert(0, lower)\n if self.extend in ('both', 'max'):\n self._levels.append(upper)\n self._levels = np.asarray(self._levels)\n if not self.filled:\n self.layers = self.levels\n return\n if self.logscale:\n self.layers = np.sqrt(self._levels[:-1]) * np.sqrt(self._levels[1:])\n else:\n self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])", + "docstring": "Assign values to :attr: based on :attr:, adding extended layers as needed if contours are filled. For line contours, layers simply coincide with levels; a line is a thin layer. No extended levels are needed with line contours.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\contour.py", + "ast_data": "FunctionDef name:_process_levels arg:self arguments arg Assign Call If Assign Assign If Compare Call If Compare Call Assign Call If Assign Return return:no If Assign Call Call Assign" + }, + { + "library": "pygame", + "name": "pixels3d", + "source_code": "def pixels3d(surface):\n return numpy_array(surface.get_view('3'), copy=False)", + "docstring": "pygame.surfarray.pixels3d(Surface): return array reference pixels into a 3d array Create a new 3D array that directly references the pixel values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This will only work on Surfaces that have 24-bit or 32-bit formats. Lower pixel formats cannot be referenced. The Surface this references will remain locked for the lifetime of the array (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:pixels3d arg:surface arguments arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "get_redirect_uri", + "source_code": "def get_redirect_uri(self):\n raise NotImplementedError()", + "docstring": "A method to get authorization code's ``:: def get_redirect_uri(self): return self.redirect_uri :return: A URL string", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py", + "ast_data": "FunctionDef name:get_redirect_uri arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "get_day_format", + "source_code": "def get_day_format(self):\n return self.day_format", + "docstring": "Get a day format string in strptime syntax to be used to parse the day from url variables.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_day_format arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "file_generator_limited", + "source_code": "def file_generator_limited(fileobj, count, chunk_size=65536):\n remaining = count\n while remaining > 0:\n chunk = fileobj.read(min(chunk_size, remaining))\n chunklen = len(chunk)\n if chunklen == 0:\n return\n remaining -= chunklen\n yield chunk", + "docstring": "Yield the given file object in chunks. Stopps after bytes has been emitted. Default chunk size is 64kB. (Core)", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:file_generator_limited arg:fileobj arg:count arg:chunk_size arguments arg arg arg Assign While Compare Assign Call Call Assign Call If Compare Return return:no" + }, + { + "library": "pytorch", + "name": "make_simplify_with_ranges_cache", + "source_code": "def make_simplify_with_ranges_cache(self) -> Callable[[Expr, VarRanges], Expr]:\n cache: dict[tuple[Any, ...], Expr] = {}\n replacement_count = len(self.replacements)\n\n def simplify_with_ranges(expr: Expr, var_ranges: VarRanges) -> Expr:\n nonlocal replacement_count\n if replacement_count != len(self.replacements):\n cache.clear()\n replacement_count = len(self.replacements)\n key = (expr, *var_ranges.items())\n result = cache.get(key, None)\n if result is None:\n result = self._simplify_with_ranges(expr, var_ranges)\n cache[key] = result\n if result != expr:\n cache[result, *var_ranges.items()] = result\n return result\n return simplify_with_ranges", + "docstring": "self._simplify_with_ranges() can be expensive, cache its results", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:make_simplify_with_ranges_cache arg:self arguments arg Assign Call FunctionDef name:simplify_with_ranges arg:expr arg:var_ranges arguments arg arg If Compare Call Call Assign Call Assign Call Assign Call If Compare Assign Call Assign If Compare Assign Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "get_lapack_funcs", + "source_code": "@_memoize_get_funcs\ndef get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):\n if isinstance(ilp64, str):\n if ilp64 == 'preferred':\n ilp64 = HAS_ILP64\n else:\n raise ValueError(\"Invalid value for 'ilp64'\")\n if not ilp64:\n return _get_funcs(names, arrays, dtype, 'LAPACK', _flapack, _clapack, 'flapack', 'clapack', _lapack_alias, ilp64=False)\n else:\n if not HAS_ILP64:\n raise RuntimeError('LAPACK ILP64 routine requested, but Scipy compiled only with 32-bit BLAS')\n return _get_funcs(names, arrays, dtype, 'LAPACK', _flapack_64, None, 'flapack_64', None, _lapack_alias, ilp64=True)", + "docstring": "Return available LAPACK function objects from names. Arrays are used to determine the optimal prefix of LAPACK routines. Parameters ---------- names : str or sequence of str Name(s) of LAPACK functions without type prefix. arrays : sequence of ndarrays, optional Arrays can be given to determine optimal prefix of LAPACK routines. If not given, double-precision routines will be used, otherwise the most generic type in arrays will be used. dtype : str or dtype, optional Data-type specifier. Not used if is non-empty. ilp64 : {True, False, 'preferred'}, optional Whether to return ILP64 routine variant. Choosing 'preferred' returns ILP64 routine if available, and otherwise the 32-bit routine. Default: False Returns ------- funcs : list List containing the found function(s). Notes ----- This routine automatically chooses between Fortran/C interfaces. Fortran code is used whenever possible for arrays with column major order. In all other cases, C code is preferred. In LAPACK, the naming convention is that all functions start with a type prefix, which depends on the type of the principal matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy types {float32, float64, complex64, complex128} respectively, and are stored in attribute `` >>> a = rng.random((1000, 1000)) >>> b = rng.random((1000, 1)) * 1j >>> # We pick up zsysv and zsysv_lwork due to b array ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b)) >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\lapack.py", + "ast_data": "FunctionDef name:get_lapack_funcs arg:names arg:arrays arg:dtype arg:ilp64 arguments arg arg arg arg If Call If Compare Assign Raise Call If Return return:yes Call If Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Verbatim", + "source_code": "class Verbatim:\n\n def __init__(self, x):\n self._x = x\n\n def pdfRepr(self):\n return self._x", + "docstring": "Store verbatim PDF command content for later inclusion in the stream.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "ClassDef name:Verbatim FunctionDef name:__init__ arg:self arg:x arguments arg arg Assign FunctionDef name:pdfRepr arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "all_reduce", + "source_code": "@_exception_logger\ndef all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):\n relevant_args = (tensor,)\n if has_torch_function(relevant_args):\n return handle_torch_function(all_reduce, relevant_args, tensor, op=op, group=group, async_op=async_op)\n _check_single_tensor(tensor, 'tensor')\n if _rank_not_in_group(group):\n _warn_not_in_group('all_reduce')\n return\n if tensor.is_complex():\n if not supports_complex(op):\n raise ValueError(f'all_reduce does not support {op} on complex tensors')\n tensor = torch.view_as_real(tensor)\n opts = AllreduceOptions()\n opts.reduceOp = op\n opts.asyncOp = async_op\n if group is None:\n group = _get_default_group()\n if group in _world.pg_coalesce_state.keys():\n coll = _CollOp(all_reduce, tensor, None, op, None)\n _world.pg_coalesce_state[group].append(coll)\n if async_op:\n return _IllegalWork()\n else:\n return None\n work = group.allreduce([tensor], opts)\n if async_op:\n return work\n elif work is not None:\n work.wait()", + "docstring": "Reduces the tensor data across all machines in a way that all get the final result. After the call `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group Examples: >>> # xdoctest: +SKIP(\"no rank\") >>> # All tensors below are of torch.int64 type. >>> # We have 2 process groups, 2 ranks. >>> device = torch.device(f\"cuda:{rank}\") >>> tensor = torch.arange(2, dtype=torch.int64, device=device) + 1 + 2 * rank >>> tensor tensor([1, 2], device='cuda:0') # Rank 0 tensor([3, 4], device='cuda:1') # Rank 1 >>> dist.all_reduce(tensor, op=ReduceOp.SUM) >>> tensor tensor([4, 6], device='cuda:0') # Rank 0 tensor([4, 6], device='cuda:1') # Rank 1 >>> # All tensors below are of torch.cfloat type. >>> # We have 2 process groups, 2 ranks. >>> tensor = torch.tensor( ... [1 + 1j, 2 + 2j], dtype=torch.cfloat, device=device ... ) + 2 * rank * (1 + 1j) >>> tensor tensor([1.+1.j, 2.+2.j], device='cuda:0') # Rank 0 tensor([3.+3.j, 4.+4.j], device='cuda:1') # Rank 1 >>> dist.all_reduce(tensor, op=ReduceOp.SUM) >>> tensor tensor([4.+4.j, 6.+6.j], device='cuda:0') # Rank 0 tensor([4.+4.j, 6.+6.j], device='cuda:1') # Rank 1", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:all_reduce arg:tensor arg:op arg:group arg:async_op arguments arg arg arg arg Assign If Call Return return:yes Call Call If Call Call Return return:no If Call If Call Raise Call Assign Call Assign Call Assign Assign If Compare Assign Call If Compare Call Assign Call Call If Return return:yes Call Return return:no Assign Call If Return return:yes If Compare Call" + }, + { + "library": "pytorch", + "name": "_join_rocm_home", + "source_code": "def _join_rocm_home(*paths) -> str:\n if ROCM_HOME is None:\n raise OSError('ROCM_HOME environment variable is not set. Please set it to your ROCm install root.')\n return os.path.join(ROCM_HOME, *paths)", + "docstring": "Join paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set. This is basically a lazy way of raising an error for missing $ROCM_HOME only once we need to get any ROCm-specific path.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\cpp_extension.py", + "ast_data": "FunctionDef name:_join_rocm_home arguments arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "draw_tex", + "source_code": "def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):\n self._draw_text_as_path(gc, x, y, s, prop, angle, ismath='TeX')", + "docstring": "Draw a TeX instance. Parameters ---------- gc : The graphics context. x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The TeX text string. prop : The font properties. angle : float The rotation angle in degrees anti-clockwise. mtext : The original text object to be rendered.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_tex arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arguments arg arg arg arg arg arg arg arg Call" + }, + { + "library": "matplotlib", + "name": "colorbar", + "source_code": "@property\ndef colorbar(self):\n return self._colorizer.colorbar", + "docstring": "The last colorbar associated with this object. May be None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:colorbar arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "ChoiceCaller", + "source_code": "class ChoiceCaller:\n\n def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, description: str) -> None:\n super().__init__()\n self.name = name\n self.layout = layout\n self.input_nodes = input_nodes\n self.description = description\n\n def benchmark(self, *args, out) -> float:\n algo = self.to_callable()\n return benchmarker.benchmark(algo, args, {'out': out})\n\n def call_name(self) -> str:\n raise NotImplementedError\n\n def to_callable(self):\n raise NotImplementedError\n\n def hash_key(self) -> str:\n raise NotImplementedError\n\n def output_node(self) -> TensorBox:\n raise NotImplementedError\n\n def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n return {}\n\n def autoheuristic_id(self) -> str:\n return 'unsupported_choice'", + "docstring": "Represents a possible choice used in autotune_process.py. During autotuning, self.benchmark() is first called to get benchmark result, and if this choice is selected, self.output_node() is called to get the output_node. Children classes: TritonTemplateCaller, CUDATemplateCaller.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:ChoiceCaller FunctionDef name:__init__ arg:self arg:name arg:input_nodes arg:layout arg:description arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:benchmark arg:self arguments arg arg arg Assign Call Return return:yes Call FunctionDef name:call_name arg:self arguments arg Raise FunctionDef name:to_callable arg:self arguments arg Raise FunctionDef name:hash_key arg:self arguments arg Raise FunctionDef name:output_node arg:self arguments arg Raise FunctionDef name:info_dict arg:self arguments arg Return return:no FunctionDef name:autoheuristic_id arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "fuzz_n_tuple", + "source_code": "def fuzz_n_tuple(self, n: int, max_combinations: int=1000) -> ResultType:\n results = ResultType()\n print(f'Starting {n}-tuple testing with seed {self.seed}')\n random.seed(self.seed)\n for combo in itertools.combinations(self.fields, n):\n st = self._fuzz_helper(results, combo)\n if st != Status.SKIPPED:\n max_combinations -= 1\n if max_combinations <= 0:\n print('Reached maximum combinations limit')\n break\n return results", + "docstring": "Test every combination of n configs. returns a dict of this shape: {(config-1, config-2... config-n): status}", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "FunctionDef name:fuzz_n_tuple arg:self arg:n arg:max_combinations arguments arg arg arg Assign Call Call Call For Call Assign Call If Compare If Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "_quantile", + "source_code": "def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:\n pa_dtype = self._pa_array.type\n data = self._pa_array\n if pa.types.is_temporal(pa_dtype):\n nbits = pa_dtype.bit_width\n if nbits == 32:\n data = data.cast(pa.int32())\n else:\n data = data.cast(pa.int64())\n result = pc.quantile(data, q=qs, interpolation=interpolation)\n if pa.types.is_temporal(pa_dtype):\n if pa.types.is_floating(result.type):\n result = pc.floor(result)\n nbits = pa_dtype.bit_width\n if nbits == 32:\n result = result.cast(pa.int32())\n else:\n result = result.cast(pa.int64())\n result = result.cast(pa_dtype)\n return type(self)(result)", + "docstring": "Compute the quantiles of self for each quantile in . Parameters ---------- qs : np.ndarray[float64] interpolation: str Returns ------- same type as self", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:_quantile arg:self arg:qs arg:interpolation arguments arg arg arg Assign Assign If Call Assign If Compare Assign Call Call Assign Call Call Assign Call If Call If Call Assign Call Assign If Compare Assign Call Call Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_visible_devices", + "source_code": "@tf_export('config.get_visible_devices', 'config.experimental.get_visible_devices')\n@deprecation.deprecated_endpoints('config.experimental.get_visible_devices')\ndef get_visible_devices(device_type=None):\n return context.context().get_visible_devices(device_type)", + "docstring": "Get the list of visible physical devices. Returns the list of s currently marked as visible to the runtime. A visible device will have at least one associated with it once the runtime is initialized. The following example verifies all visible GPUs have been disabled: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... # Disable all GPUS ... tf.config.set_visible_devices([], 'GPU') ... visible_devices = tf.config.get_visible_devices() ... for device in visible_devices: ... assert device.device_type != 'GPU' ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device_type: (optional string) Only include devices matching this device type. For example \"CPU\" or \"GPU\". Returns: List of visible s", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:get_visible_devices arg:device_type arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "to_tensor_spec", + "source_code": "def to_tensor_spec(input_spec, default_dtype=None):\n default_dtype = default_dtype or backend.floatx()\n if isinstance(input_spec, InputSpec):\n dtype = input_spec.dtype or default_dtype\n return tensor_spec.TensorSpec(to_tensor_shape(input_spec), dtype)\n return tensor_spec.TensorSpec(None, default_dtype)", + "docstring": "Converts a Keras InputSpec object to a TensorSpec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\input_spec.py", + "ast_data": "FunctionDef name:to_tensor_spec arg:input_spec arg:default_dtype arguments arg arg Assign BoolOp Call If Call Assign BoolOp Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "log_event_start", + "source_code": "def log_event_start(self, event_name: str, time_ns: int, metadata: dict[str, Any], log_pt2_compile_event: bool=False, compile_id: Optional[CompileId]=None) -> None:\n compile_id = compile_id or torch._guards.CompileContext.current_compile_id()\n metadata['compile_id'] = str(compile_id)\n self._log_timed_event(event_name, time_ns, 'B', metadata)\n self.get_stack().append(event_name)\n self.add_event_data(event_name, **metadata)\n if log_pt2_compile_event:\n self.get_pt2_compile_substack().append(event_name)", + "docstring": "Logs the start of a single event. :param str event_name Name of event to appear in trace :param time_ns Timestamp in nanoseconds :param metadata: Any extra metadata associated with this event :param log_pt2_compile_event: If True, log to pt2_compile_events :param compile_id: Explicit compile_id (rather than using the current context)", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:log_event_start arg:self arg:event_name arg:time_ns arg:metadata arg:log_pt2_compile_event arg:compile_id arguments arg arg arg arg arg arg Assign BoolOp Call Assign Call Call Call Call Call If Call Call" + }, + { + "library": "tensorflow", + "name": "eager_restore", + "source_code": "def eager_restore(self, trackable):\n assert context.executing_eagerly()\n for saveable in self.globally_named_object_attributes(trackable):\n restored_tensors = []\n tensor_missing = False\n for spec in saveable.specs:\n if spec.name in self.dtype_map:\n with ops.device('cpu:0'):\n restored, = io_ops.restore_v2(prefix=self.save_path, tensor_names=[spec.name], shape_and_slices=[''], dtypes=[self.dtype_map[spec.name]], name='%s_checkpoint_read' % (spec.name,))\n restored_tensors.append(array_ops.identity(restored))\n else:\n tensor_missing = True\n if tensor_missing:\n self.unused_attributes.setdefault(trackable, []).append(saveable.name)\n else:\n saveable.restore(restored_tensors=restored_tensors, restored_shapes=None)", + "docstring": "Runs restore ops for 's attributes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:eager_restore arg:self arg:trackable arguments arg arg Call For Call Assign Assign For If Compare With Call Assign Call Call Call Assign If Call Call Call" + }, + { + "library": "pytorch", + "name": "_format_time", + "source_code": "def _format_time(time_us):\n US_IN_SECOND = 1000.0 * 1000.0\n US_IN_MS = 1000.0\n if time_us >= US_IN_SECOND:\n return f'{time_us / US_IN_SECOND:.3f}s'\n if time_us >= US_IN_MS:\n return f'{time_us / US_IN_MS:.3f}ms'\n return f'{time_us:.3f}us'", + "docstring": "Define how to format time in FunctionEvent.", + "type": "function", + "file_path": "pytorch\\torch\\autograd\\profiler_util.py", + "ast_data": "FunctionDef name:_format_time arg:time_us arguments arg Assign Assign If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "__delitem__", + "source_code": "def __delitem__(self, index):\n if not isinstance(index, (int, slice)):\n raise TypeError('%s is not a legal index' % index)\n origLen = len(self)\n if isinstance(index, int):\n index = self._checkindex(index)\n indexRange = [index]\n else:\n indexRange = range(*index.indices(origLen))\n newLen = origLen - len(indexRange)\n newItems = (self._get_single_internal(i) for i in range(origLen) if i not in indexRange)\n self._rebuild(newLen, newItems)", + "docstring": "Delete the item(s) at the specified index/slice.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:__delitem__ arg:self arg:index arguments arg arg If Call Raise Call Assign Call If Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Compare Call" + }, + { + "library": "pytorch", + "name": "get_state", + "source_code": "@abstractmethod\ndef get_state(self) -> Optional[tuple[bytes, Token]]:\n pass", + "docstring": "Get the rendezvous state. Returns: A tuple of the encoded rendezvous state and its fencing token or `` if no state is found in the backend. Raises: RendezvousConnectionError: The connection to the backend has failed. RendezvousStateError: The rendezvous state is corrupt.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:get_state arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "AggregateProfile", + "source_code": "class AggregateProfile(object):\n\n def __init__(self, profile_datum):\n self.total_op_time = profile_datum.op_time\n self.total_exec_time = profile_datum.exec_time\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n self._node_to_exec_count = {device_and_node: 1}\n\n def add(self, profile_datum):\n self.total_op_time += profile_datum.op_time\n self.total_exec_time += profile_datum.exec_time\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n if device_and_node in self._node_to_exec_count:\n self._node_to_exec_count[device_and_node] += 1\n else:\n self._node_to_exec_count[device_and_node] = 1\n\n @property\n def node_count(self):\n return len(self._node_to_exec_count)\n\n @property\n def node_exec_count(self):\n return sum(self._node_to_exec_count.values())", + "docstring": "Profile summary data for aggregating a number of ProfileDatum.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\profiling.py", + "ast_data": "ClassDef name:AggregateProfile FunctionDef name:__init__ arg:self arg:profile_datum arguments arg arg Assign Assign Assign Assign FunctionDef name:add arg:self arg:profile_datum arguments arg arg Assign Assign If Compare Assign FunctionDef name:node_count arg:self arguments arg Return return:yes Call FunctionDef name:node_exec_count arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "roots", + "source_code": "@array_function_dispatch(_roots_dispatcher)\ndef roots(p):\n p = atleast_1d(p)\n if p.ndim != 1:\n raise ValueError('Input must be a rank-1 array.')\n non_zero = NX.nonzero(NX.ravel(p))[0]\n if len(non_zero) == 0:\n return NX.array([])\n trailing_zeros = len(p) - non_zero[-1] - 1\n p = p[int(non_zero[0]):int(non_zero[-1]) + 1]\n if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):\n p = p.astype(float)\n N = len(p)\n if N > 1:\n A = diag(NX.ones((N - 2,), p.dtype), -1)\n A[0, :] = -p[1:] / p[0]\n roots = eigvals(A)\n else:\n roots = NX.array([])\n roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))\n return roots", + "docstring": "Return the roots of a polynomial with coefficients given in p. .. note:: This forms part of the old polynomial API. Since version 1.4, the new polynomial API defined in is preferred. A summary of the differences can be found in the :doc:. The values in the rank-1 array are coefficients of a polynomial. If the length of is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the roots of the polynomial. Raises ------ ValueError When cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py", + "ast_data": "FunctionDef name:roots arg:p arguments arg Assign Call If Compare Raise Call Assign Call Call If Compare Call Return return:yes Call Assign Call Assign Call Call If Call Assign Call Assign Call If Compare Assign Call Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "maybe_cast_str_impl", + "source_code": "@overload(maybe_cast_str)\ndef maybe_cast_str_impl(x):\n if isinstance(x, types.UnicodeCharSeq):\n return lambda x: str(x)\n else:\n return lambda x: x", + "docstring": "Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string). Is a no-op for other types.", + "type": "function", + "file_path": "pandas\\pandas\\core\\_numba\\extensions.py", + "ast_data": "FunctionDef name:maybe_cast_str_impl arg:x arguments arg If Call Return return:yes arguments arg Call Return return:yes arguments arg Call" + }, + { + "library": "pytorch", + "name": "MutationType", + "source_code": "class MutationType:\n\n def __init__(self, typ: SourceType) -> None:\n if typ is SourceType.Existing:\n self.scope = 0\n elif typ is SourceType.New:\n self.scope = current_scope_id()\n else:\n unimplemented_v2(gb_type='Unsupported SourceType', context=f'MutationType.__init__ {self} {typ}', explanation=f'Dynamo does not support the type `{typ}`', hints=['This branch is not supposed to be reachable.', *graph_break_hints.DYNAMO_BUG])", + "docstring": "Base class for Variable.mutation_type. It encodes information about 1. The type of mutation Dynamo allows on the variable. 2. Whether the value represented by this variable already existed before Dynamo tracing.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "ClassDef name:MutationType FunctionDef name:__init__ arg:self arg:typ arguments arg arg If Compare Assign If Compare Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "_find_matching_floating_dtype", + "source_code": "def _find_matching_floating_dtype(*arrays, xp):\n dtyped_arrays = [xp.asarray(a) for a in arrays if hasattr(a, 'dtype')]\n floating_dtypes = [a.dtype for a in dtyped_arrays if xp.isdtype(a.dtype, 'real floating')]\n if floating_dtypes:\n return xp.result_type(*floating_dtypes)\n return xp.asarray(0.0).dtype", + "docstring": "Find a suitable floating point dtype when computing with arrays. If any of the arrays are floating point, return the dtype with the highest precision by following official type promotion rules: If there are no floating point input arrays (all integral inputs for instance), return the default floating point dtype for the namespace.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:_find_matching_floating_dtype arguments arg arg Assign Call Call Assign Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "has_header", + "source_code": "def has_header(self, header):\n return header in self.headers", + "docstring": "Case-insensitive check for a header.", + "type": "method", + "file_path": "django\\django\\http\\response.py", + "ast_data": "FunctionDef name:has_header arg:self arg:header arguments arg arg Return return:yes Compare" + }, + { + "library": "pandas", + "name": "should_use_regex", + "source_code": "def should_use_regex(regex: bool, to_replace: Any) -> bool:\n if is_re(to_replace):\n regex = True\n regex = regex and is_re_compilable(to_replace)\n regex = regex and re.compile(to_replace).pattern != ''\n return regex", + "docstring": "Decide whether to treat as a regular expression.", + "type": "function", + "file_path": "pandas\\pandas\\core\\array_algos\\replace.py", + "ast_data": "FunctionDef name:should_use_regex arg:regex arg:to_replace arguments arg arg If Call Assign Assign BoolOp Call Assign BoolOp Compare Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None):\n self._more_validate_params()\n lr = 'pa1' if self.loss == 'hinge' else 'pa2'\n return self._fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)", + "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,) The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arguments arg arg arg arg arg Call Assign Compare Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "loglog", + "source_code": "@_docstring.interpd\ndef loglog(self, *args, **kwargs):\n dx = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basex', 'subsx', 'nonposx']}\n self.set_xscale('log', **dx)\n dy = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basey', 'subsy', 'nonposy']}\n self.set_yscale('log', **dy)\n return self.plot(*args, **{k: v for k, v in kwargs.items() if k not in {*dx, *dy}})", + "docstring": "Make a plot with log scaling on both the x- and y-axis. Call signatures:: loglog([x], y, [fmt], data=None, **kwargs) loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs) This is just a thin wrapper around which additionally changes both the x-axis and the y-axis to log scaling. All the concepts and parameters of plot can be used here as well. The additional parameters *base*, *subs* and *nonpositive* control the x/y-axis properties. They are just forwarded to and . To use different properties on the x-axis and the y-axis, use e.g. `.Axes.set_xscale.Axes.set_yscale.plot.Line2D` Objects representing the plotted data.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:loglog arg:self arguments arg arg arg Assign Call Compare Call Assign Call Compare Call Return return:yes Call Call Compare" + }, + { + "library": "pytorch", + "name": "build", + "source_code": "@staticmethod\ndef build(kernel: CppKernel):\n itervars = kernel.itervars\n ranges = kernel.ranges\n reduction_depth = kernel.reduction_depth\n assert reduction_depth is not None\n loops: Optional[list[LoopLevel]] = None\n for loop_idx, (var, size) in enumerate(zip(itervars, ranges)):\n loop = LoopLevel(var, size)\n if not loops:\n loops = [loop]\n else:\n loops.append(loop)\n if loop_idx >= reduction_depth:\n loop.is_reduction = kernel.is_reduction\n loop_nest = LoopNest(loops)\n return loop_nest", + "docstring": "Build a LoopNest with the given as the leaf", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py", + "ast_data": "FunctionDef name:build arg:kernel arguments arg Assign Assign Assign Compare For Call Call Assign Call If Assign Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "sym_difference", + "source_code": "def sym_difference(self, other):\n return self._geomgen(capi.geom_sym_diff, other)", + "docstring": "Return a new geometry which is the symmetric difference of this geometry and the other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:sym_difference arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "saver", + "source_code": "@property\ndef saver(self):\n return self._saver", + "docstring": "Return the Saver used by the supervisor. Returns: A Saver object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:saver arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "get_field_type", + "source_code": "def get_field_type(self, data_type, description):\n return self.data_types_reverse[data_type]", + "docstring": "Hook for a database backend to use the cursor description to match a Django field type to a database column. For Oracle, the column data_type on its own is insufficient to distinguish between a FloatField and IntegerField, for example.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\introspection.py", + "ast_data": "FunctionDef name:get_field_type arg:self arg:data_type arg:description arguments arg arg arg Return return:yes" + }, + { + "library": "pandas", + "name": "assign", + "source_code": "def assign(self, **kwargs) -> DataFrame:\n data = self.copy(deep=False)\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data", + "docstring": "Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : callable or Series The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. Notes ----- Assigning multiple columns within the same `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df[\"temp_c\"] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign( ... temp_f=lambda x: x[\"temp_c\"] * 9 / 5 + 32, ... temp_k=lambda x: (x[\"temp_f\"] + 459.67) * 5 / 9, ... ) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:assign arg:self arguments arg arg Assign Call For Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_assert_all_finite", + "source_code": "def _assert_all_finite(X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=''):\n xp, is_array_api = get_namespace(X)\n if _get_config()['assume_finite']:\n return\n X = xp.asarray(X)\n if not is_array_api and X.dtype == np.dtype('object') and (not allow_nan):\n if _object_dtype_isnan(X).any():\n raise ValueError('Input contains NaN')\n if not xp.isdtype(X.dtype, ('real floating', 'complex floating')):\n return\n with np.errstate(over='ignore'):\n first_pass_isfinite = xp.isfinite(xp.sum(X))\n if first_pass_isfinite:\n return\n _assert_all_finite_element_wise(X, xp=xp, allow_nan=allow_nan, msg_dtype=msg_dtype, estimator_name=estimator_name, input_name=input_name)", + "docstring": "Like assert_all_finite, but only for ndarray.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:_assert_all_finite arg:X arg:allow_nan arg:msg_dtype arg:estimator_name arg:input_name arguments arg arg arg arg arg Assign Call If Call Return return:no Assign Call If BoolOp Compare Call If Call Call Raise Call If Call Return return:no With Call Assign Call Call If Return return:no Call" + }, + { + "library": "seaborn", + "name": "Area", + "source_code": "@document_properties\n@dataclass\nclass Area(AreaBase, Mark):\n color: MappableColor = Mappable('C0')\n alpha: MappableFloat = Mappable(0.2)\n fill: MappableBool = Mappable(True)\n edgecolor: MappableColor = Mappable(depend='color')\n edgealpha: MappableFloat = Mappable(1)\n edgewidth: MappableFloat = Mappable(rc='patch.linewidth')\n edgestyle: MappableStyle = Mappable('-')\n baseline: MappableFloat = Mappable(0, grouping=False)\n\n def _standardize_coordinate_parameters(self, data, orient):\n dv = {'x': 'y', 'y': 'x'}[orient]\n return data.rename(columns={'baseline': f'{dv}min', dv: f'{dv}max'})\n\n def _postprocess_artist(self, artist, ax, orient):\n artist.set_linewidth(artist.get_linewidth() * 2)\n linestyle = artist.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple((x / 2 for x in linestyle[1])))\n artist.set_linestyle(linestyle)\n artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n if self.artist_kws.get('clip_on', True):\n artist.set_clip_box(ax.bbox)\n val_idx = ['y', 'x'].index(orient)\n artist.sticky_edges[val_idx][:] = (0, np.inf)", + "docstring": "A fill mark drawn from a baseline to data values. See also -------- Band : A fill mark representing an interval between values. Examples -------- .. include:: ../docstrings/objects.Area.rst", + "type": "class", + "file_path": "seaborn\\seaborn\\_marks\\area.py", + "ast_data": "ClassDef name:Area Call Call Call Call Call Call Call Call FunctionDef name:_standardize_coordinate_parameters arg:self arg:data arg:orient arguments arg arg arg Assign Return return:yes Call FunctionDef name:_postprocess_artist arg:self arg:artist arg:ax arg:orient arguments arg arg arg arg Call Call Assign Call If Assign Call Call Call Call Call If Call Call Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "sharey", + "source_code": "def sharey(self, other):\n _api.check_isinstance(_AxesBase, other=other)\n if self._sharey is not None and other is not self._sharey:\n raise ValueError('y-axis is already shared')\n self._shared_axes['y'].join(self, other)\n self._sharey = other\n self.yaxis.major = other.yaxis.major\n self.yaxis.minor = other.yaxis.minor\n y0, y1 = other.get_ylim()\n self.set_ylim(y0, y1, emit=False, auto=other.get_autoscaley_on())\n self.yaxis._scale = other.yaxis._scale", + "docstring": "Share the y-axis with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the y-axis is already being shared with another Axes. Note that it is not possible to unshare axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:sharey arg:self arg:other arguments arg arg Call If BoolOp Compare Compare Raise Call Call Assign Assign Assign Assign Call Call Call Assign" + }, + { + "library": "matplotlib", + "name": "_format_approx", + "source_code": "def _format_approx(number, precision):\n return f'{number:.{precision}f}'.rstrip('0').rstrip('.') or '0'", + "docstring": "Format the number with at most the number of decimals given as precision. Remove trailing zeros and possibly the decimal point.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_format_approx arg:number arg:precision arguments arg arg Return return:yes BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "_batch_all_reduce", + "source_code": "def _batch_all_reduce(self, reduce_op, per_replica_values):\n dense_values, dense_indices, sparse_values, sparse_indices = cross_device_utils.split_by_sparsity(per_replica_values)\n if dense_values:\n dense_results = self._do_batch_all_reduce(reduce_op, dense_values)\n else:\n dense_results = []\n if sparse_values:\n sparse_results = self._do_batch_all_reduce_sparse(reduce_op, sparse_values)\n else:\n sparse_results = []\n return cross_device_utils.stitch_values(((dense_results, dense_indices), (sparse_results, sparse_indices)))", + "docstring": "All-reduce algorithm in a batch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:_batch_all_reduce arg:self arg:reduce_op arg:per_replica_values arguments arg arg arg Assign Call If Assign Call Assign If Assign Call Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "quantile_with_mask", + "source_code": "def quantile_with_mask(values: np.ndarray, mask: npt.NDArray[np.bool_], fill_value, qs: npt.NDArray[np.float64], interpolation: str) -> np.ndarray:\n assert values.shape == mask.shape\n if values.ndim == 1:\n values = np.atleast_2d(values)\n mask = np.atleast_2d(mask)\n res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)\n return res_values[0]\n assert values.ndim == 2\n is_empty = values.shape[1] == 0\n if is_empty:\n flat = np.full(len(qs), fill_value)\n result = np.repeat(flat, len(values)).reshape(len(values), len(qs))\n else:\n result = _nanquantile(values, qs, na_value=fill_value, mask=mask, interpolation=interpolation)\n result = np.asarray(result)\n result = result.T\n return result", + "docstring": "Compute the quantiles of the given values for each quantile in . Parameters ---------- values : np.ndarray For ExtensionArray, this is _values_for_factorize()[0] mask : np.ndarray[bool] mask = isna(values) For ExtensionArray, this is computed before calling _value_for_factorize fill_value : Scalar The value to interpret fill NA entries with For ExtensionArray, this is _values_for_factorize()[1] qs : np.ndarray[float64] interpolation : str Type of interpolation Returns ------- np.ndarray Notes ----- Assumes values is already 2D. For ExtensionArray this means np.atleast_2d has been called on _values_for_factorize()[0] Quantile is computed along axis=1.", + "type": "function", + "file_path": "pandas\\pandas\\core\\array_algos\\quantile.py", + "ast_data": "FunctionDef name:quantile_with_mask arg:values arg:mask arg:fill_value arg:qs arg:interpolation arguments arg arg arg arg arg Compare If Compare Assign Call Assign Call Assign Call Return return:yes Compare Assign Compare If Assign Call Call Assign Call Call Call Call Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "draw_if_interactive", + "source_code": "def draw_if_interactive(*args, **kwargs):\n return _get_backend_mod().draw_if_interactive(*args, **kwargs)", + "docstring": "Redraw the current figure if in interactive mode. .. warning:: End users will typically not have to call this function because the the interactive mode takes care of this.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:draw_if_interactive arguments arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "yuv_to_rgb", + "source_code": "def yuv_to_rgb(image: Tensor) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if image.dim() < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n y: Tensor = image[..., 0, :, :]\n u: Tensor = image[..., 1, :, :]\n v: Tensor = image[..., 2, :, :]\n r: Tensor = y + 1.14 * v\n g: Tensor = y + -0.396 * u - 0.581 * v\n b: Tensor = y + 2.029 * u\n out: Tensor = torch.stack([r, g, b], -3)\n return out", + "docstring": "Convert an YUV image to RGB. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: image: YUV Image to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = yuv_to_rgb(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\yuv.py", + "ast_data": "FunctionDef name:yuv_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "limit_epochs", + "source_code": "@tf_export(v1=['train.limit_epochs'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.')\ndef limit_epochs(tensor, num_epochs=None, name=None):\n if num_epochs is None:\n return tensor\n if num_epochs <= 0:\n raise ValueError('num_epochs must be > 0 not %d.' % num_epochs)\n with ops.name_scope(name, 'limit_epochs', [tensor]) as name:\n zero64 = constant_op.constant(0, dtype=dtypes.int64)\n epochs = variable_v1.VariableV1(zero64, name='epochs', trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])\n counter = epochs.count_up_to(num_epochs)\n with ops.control_dependencies([counter]):\n return array_ops.identity(tensor, name=name)", + "docstring": "Returns tensor times and then raises an error. Note: creates local counter . Use to initialize local variables. Args: tensor: Any . num_epochs: A positive integer (optional). If specified, limits the number of steps the output tensor may be evaluated. name: A name for the operations (optional). Returns: tensor or . Raises: ValueError: if is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:limit_epochs arg:tensor arg:num_epochs arg:name arguments arg arg arg If Compare Return return:yes If Compare Raise Call With Call Assign Call Assign Call Assign Call With Call Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "q", + "source_code": "@property\ndef q(self) -> Tensor:\n return self.data", + "docstring": "Return the underlying data with shape :math:. Alias for :func:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:q arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_MatMulGradAgainstFirstOnly", + "source_code": "def _MatMulGradAgainstFirstOnly(op: ops.Operation, grad):\n t_a = op.get_attr('transpose_a')\n t_b = op.get_attr('transpose_b')\n b = math_ops.conj(op.inputs[1])\n if not t_a and (not t_b):\n grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True, grad_a=True)\n elif not t_a and t_b:\n grad_a = gen_math_ops.mat_mul(grad, b, grad_a=True)\n elif t_a and (not t_b):\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True, grad_a=True)\n elif t_a and t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True, grad_a=True)\n return (grad_a, None)", + "docstring": "Gradient for MatMul, only for the first input.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_MatMulGradAgainstFirstOnly arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call If BoolOp Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "JWEAlgorithmWithTagAwareKeyAgreement", + "source_code": "class JWEAlgorithmWithTagAwareKeyAgreement(JWEAlgorithmBase, metaclass=ABCMeta):\n\n def generate_keys_and_prepare_headers(self, enc_alg, key, sender_key, preset=None):\n raise NotImplementedError\n\n def agree_upon_key_and_wrap_cek(self, enc_alg, headers, key, sender_key, epk, cek, tag):\n raise NotImplementedError\n\n def wrap(self, enc_alg, headers, key, sender_key, preset=None):\n raise NotImplementedError\n\n def unwrap(self, enc_alg, ek, headers, key, sender_key, tag=None):\n raise NotImplementedError", + "docstring": "Interface for JWE algorithm with tag-aware key agreement (in key agreement with key wrapping mode). ECDH-1PU is an example of such an algorithm.", + "type": "class", + "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py", + "ast_data": "ClassDef name:JWEAlgorithmWithTagAwareKeyAgreement FunctionDef name:generate_keys_and_prepare_headers arg:self arg:enc_alg arg:key arg:sender_key arg:preset arguments arg arg arg arg arg Raise FunctionDef name:agree_upon_key_and_wrap_cek arg:self arg:enc_alg arg:headers arg:key arg:sender_key arg:epk arg:cek arg:tag arguments arg arg arg arg arg arg arg arg Raise FunctionDef name:wrap arg:self arg:enc_alg arg:headers arg:key arg:sender_key arg:preset arguments arg arg arg arg arg arg Raise FunctionDef name:unwrap arg:self arg:enc_alg arg:ek arg:headers arg:key arg:sender_key arg:tag arguments arg arg arg arg arg arg arg Raise" + }, + { + "library": "numpy", + "name": "__radd__", + "source_code": "def __radd__(self, other):\n return add(other, self)", + "docstring": "Add other to self, and return a new masked array.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Angle3", + "source_code": "@_register_style(_style_list)\nclass Angle3(_Base):\n\n def __init__(self, angleA=90, angleB=0):\n self.angleA = angleA\n self.angleB = angleB\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x2, y2 = posB\n cosA = math.cos(math.radians(self.angleA))\n sinA = math.sin(math.radians(self.angleA))\n cosB = math.cos(math.radians(self.angleB))\n sinB = math.sin(math.radians(self.angleB))\n cx, cy = get_intersection(x1, y1, cosA, sinA, x2, y2, cosB, sinB)\n vertices = [(x1, y1), (cx, cy), (x2, y2)]\n codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n return Path(vertices, codes)", + "docstring": "Creates a simple quadratic Bézier curve between two points. The middle control point is placed at the intersecting point of two lines which cross the start and end point, and have a slope of *angleA* and *angleB*, respectively.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:Angle3 FunctionDef name:__init__ arg:self arg:angleA arg:angleB arguments arg arg arg Assign Assign FunctionDef name:connect arg:self arg:posA arg:posB arguments arg arg arg Assign Assign Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_prepare_video", + "source_code": "def _prepare_video(V):\n b, t, c, h, w = V.shape\n if V.dtype == np.uint8:\n V = np.float32(V) / 255.0\n\n def is_power2(num):\n return num != 0 and num & num - 1 == 0\n if not is_power2(V.shape[0]):\n len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0])\n V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)\n n_rows = 2 ** ((b.bit_length() - 1) // 2)\n n_cols = V.shape[0] // n_rows\n V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w))\n V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3))\n V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c))\n return V", + "docstring": "Convert a 5D tensor into 4D tensor. Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor) to [time(frame), new_width, new_height, channel] (4D tensor). A batch of images are spreaded to a grid, which forms a frame. e.g. Video with batchsize 16 will have a 4x4 grid.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\tensorboard\\_utils.py", + "ast_data": "FunctionDef name:_prepare_video arg:V arguments arg Assign If Compare Assign Call FunctionDef name:is_power2 arg:num arguments arg Return return:yes BoolOp Compare Compare If Call Assign Call Call Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "enable_history_recording", + "source_code": "@contextlib.contextmanager\ndef enable_history_recording() -> Generator[None, None, None]:\n enabled = torch._C._cuda_isHistoryEnabled()\n try:\n if not enabled:\n torch.cuda.memory._record_memory_history()\n yield\n finally:\n if not enabled:\n torch.cuda.memory._record_memory_history(None)", + "docstring": "Turns on history recording in the CUDA Caching Allocator", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:enable_history_recording arguments Assign Call Try If Call If Call" + }, + { + "library": "django", + "name": "escapejs", + "source_code": "@keep_lazy(SafeString)\ndef escapejs(value):\n return mark_safe(str(value).translate(_js_escapes))", + "docstring": "Hex encode characters for use in JavaScript strings.", + "type": "function", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:escapejs arg:value arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "django", + "name": "lazy_related_operation", + "source_code": "def lazy_related_operation(function, model, *related_models, **kwargs):\n models = [model] + [resolve_relation(model, rel) for rel in related_models]\n model_keys = (make_model_tuple(m) for m in models)\n apps = model._meta.apps\n return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)", + "docstring": "Schedule to be called once and all have been imported and registered with the app registry. will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see for the various forms these may take. Any relative references will be resolved relative to . This is a convenience wrapper for - the app registry model used is the one found in .", + "type": "function", + "file_path": "django\\django\\db\\models\\fields\\related.py", + "ast_data": "FunctionDef name:lazy_related_operation arg:function arg:model arguments arg arg arg arg Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "DictionaryLearningBenchmark", + "source_code": "class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):\n param_names = ['fit_algorithm', 'n_jobs']\n params = (['lars', 'cd'], Benchmark.n_jobs_vals)\n\n def setup_cache(self):\n super().setup_cache()\n\n def make_data(self, params):\n return _olivetti_faces_dataset()\n\n def make_estimator(self, params):\n fit_algorithm, n_jobs = params\n estimator = DictionaryLearning(n_components=15, fit_algorithm=fit_algorithm, alpha=0.1, transform_alpha=1, max_iter=20, tol=1e-16, random_state=0, n_jobs=n_jobs)\n return estimator\n\n def make_scorers(self):\n make_dict_learning_scorers(self)", + "docstring": "Benchmarks for DictionaryLearning.", + "type": "class", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\decomposition.py", + "ast_data": "ClassDef name:DictionaryLearningBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call" + }, + { + "library": "django", + "name": "_get_scheme", + "source_code": "def _get_scheme(self):\n return 'http'", + "docstring": "Hook for subclasses like WSGIRequest to implement. Return 'http' by default.", + "type": "method", + "file_path": "django\\django\\http\\request.py", + "ast_data": "FunctionDef name:_get_scheme arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "check_max_block", + "source_code": "def check_max_block(cfg: dict[str, int]):\n for var, val in cfg.items():\n block_suffix = 'BLOCK'\n if block_suffix in var:\n prefix = var.removesuffix(block_suffix)\n max_block = TRITON_MAX_BLOCK[prefix]\n assert val <= max_block, f\"'{var}' too large. Maximum: {max_block}. Actual: {val}.\"", + "docstring": "Check that block sizes are within the maximum allowed.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:check_max_block arg:cfg arguments arg For Call Assign If Compare Assign Call Assign Compare" + }, + { + "library": "pytorch", + "name": "_setup_mixed_precision_params", + "source_code": "def _setup_mixed_precision_params(mixed_precision_config, root_module):\n for param in root_module.parameters():\n if hasattr(param, '_ddp_ignored') and param._ddp_ignored:\n continue\n if not hasattr(param, '_mp_param'):\n param._mp_param = torch.zeros_like(param, device=param.device, dtype=mixed_precision_config.param_dtype, requires_grad=param.requires_grad)\n _free_storage(param._mp_param)\n param._fp_param = param.data", + "docstring": "Create and free storage for the mixed precision parameters.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_setup_mixed_precision_params arg:mixed_precision_config arg:root_module arguments arg arg For Call If BoolOp Call If Call Assign Call Call Assign" + }, + { + "library": "matplotlib", + "name": "push", + "source_code": "def push(self, o):\n self._elements[self._pos + 1:] = [o]\n self._pos = len(self._elements) - 1\n return o", + "docstring": "Push *o* to the stack after the current position, and return *o*. Discard all later elements.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:push arg:self arg:o arguments arg arg Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "set_solout", + "source_code": "def set_solout(self, solout):\n if self._integrator.supports_solout:\n self._integrator.set_solout(solout, complex=True)\n else:\n raise TypeError('selected integrator does not support solouta, choose another one')", + "docstring": "Set callable to be called at every successful integration step. Parameters ---------- solout : callable `` solout should return -1 to stop integration otherwise it should return None or 0", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_ode.py", + "ast_data": "FunctionDef name:set_solout arg:self arg:solout arguments arg arg If Call Raise Call" + }, + { + "library": "pytorch", + "name": "lp_pool1d", + "source_code": "def lp_pool1d(input: Tensor, norm_type: Union[int, float], kernel_size: int, stride: Optional[BroadcastingList1[int]]=None, ceil_mode: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode)\n if stride is not None:\n out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n else:\n out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)", + "docstring": "Apply a 1D power-average pooling over an input signal composed of several input planes. If the sum of all inputs to the power of is zero, the gradient is set to zero as well. See :class: for details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:lp_pool1d arg:input arg:norm_type arg:kernel_size arg:stride arg:ceil_mode arguments arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "entropy", + "source_code": "def entropy(self, rowcov=1, colcov=1):\n dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0]))\n dims, _, rowcov, colcov = self._process_parameters(dummy_mean, rowcov, colcov)\n rowpsd = _PSD(rowcov, allow_singular=False)\n colpsd = _PSD(colcov, allow_singular=False)\n return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet)", + "docstring": "Log of the matrix normal probability density function. Parameters ---------- rowcov : array_like, optional Among-row covariance matrix of the distribution (default: ``) Returns ------- entropy : float Entropy of the distribution Notes ----- %(_matnorm_doc_callparams_note)s", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:entropy arg:self arg:rowcov arg:colcov arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "increment_toplevel", + "source_code": "@staticmethod\ndef increment_toplevel(key: str, value: int=1, log_level: CompileEventLogLevel=CompileEventLogLevel.COMPILATION_METRIC):\n chromium_log = get_chromium_event_logger()\n top_event = chromium_log.get_outermost_event()\n if top_event is None:\n raise RuntimeError('No toplevel event active. Please only call this function within a metrics context/dynamo_timed.')\n CompileEventLogger.increment(top_event, log_level, key, value)", + "docstring": "Increments a value on the toplevel metric. By default, logs to metric.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:increment_toplevel arg:key arg:value arg:log_level arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call" + }, + { + "library": "tensorflow", + "name": "do_encode", + "source_code": "def do_encode(self, type_spec_value, encode_fn):\n type_spec_class_name = type_spec_registry.get_name(type(type_spec_value))\n type_spec_class = struct_pb2.TypeSpecProto.REGISTERED_TYPE_SPEC\n warnings.warn('Encoding a StructuredValue with type %s; loading this StructuredValue will require that this type be imported and registered.' % type_spec_class_name)\n type_state = type_spec_value._serialize()\n num_flat_components = len(nest.flatten(type_spec_value._component_specs, expand_composites=True))\n encoded_type_spec = struct_pb2.StructuredValue()\n encoded_type_spec.type_spec_value.CopyFrom(struct_pb2.TypeSpecProto(type_spec_class=type_spec_class, type_state=encode_fn(type_state), type_spec_class_name=type_spec_class_name, num_flat_components=num_flat_components))\n return encoded_type_spec", + "docstring": "Returns an encoded proto for the given .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "FunctionDef name:do_encode arg:self arg:type_spec_value arg:encode_fn arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__setattr__", + "source_code": "def __setattr__(self, name, value):\n if name == 'lr':\n name = 'learning_rate'\n if hasattr(self, '_hyper') and name in self._hyper:\n self._set_hyper(name, value)\n else:\n super(OptimizerV2, self).__setattr__(name, value)", + "docstring": "Override setattr to support dynamic hyperparameter setting.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Assign If BoolOp Call Compare Call Call Call" + }, + { + "library": "cherrypy", + "name": "get_ha1_dict_plain", + "source_code": "def get_ha1_dict_plain(user_password_dict):\n\n def get_ha1(realm, username):\n password = user_password_dict.get(username)\n if password:\n return md5_hex('%s:%s:%s' % (username, realm, password))\n return None\n return get_ha1", + "docstring": "Return a get_ha1 function which obtains a plaintext password. user_password_dict is a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, with plaintext passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the get_ha1 argument to digest_auth().", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py", + "ast_data": "FunctionDef name:get_ha1_dict_plain arg:user_password_dict arguments arg FunctionDef name:get_ha1 arg:realm arg:username arguments arg arg Assign Call If Return return:yes Call Return return:no Return return:yes" + }, + { + "library": "matplotlib", + "name": "magnitude_spectrum", + "source_code": "@_api.make_keyword_only('3.10', 'Fs')\n@_preprocess_data(replace_names=['x'])\n@_docstring.interpd\ndef magnitude_spectrum(self, x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, scale=None, **kwargs):\n if Fc is None:\n Fc = 0\n spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window, pad_to=pad_to, sides=sides)\n freqs += Fc\n yunits = _api.check_getitem({None: 'energy', 'default': 'energy', 'linear': 'energy', 'dB': 'dB'}, scale=scale)\n if yunits == 'energy':\n Z = spec\n else:\n Z = 20.0 * np.log10(spec)\n line, = self.plot(freqs, Z, **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Magnitude (%s)' % yunits)\n return (spec, freqs, line)", + "docstring": "Plot the magnitude spectrum. Compute the magnitude spectrum of *x*. Data is padded to a length of *pad_to* and the windowing function *window* is applied to the signal. Parameters ---------- x : 1-D array or sequence Array or sequence containing the data. %(Spectral)s %(Single_Spectrum)s scale : {'default', 'linear', 'dB'} The scaling of the values in the *spec*. 'linear' is no scaling. 'dB' returns the values in dB scale, i.e., the dB amplitude (20 * log10). 'default' is 'linear'. Fc : int, default: 0 The center frequency of *x*, which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns ------- spectrum : 1-D array The values for the magnitude spectrum before scaling (real valued). freqs : 1-D array The frequencies corresponding to the elements in *spectrum*. line : The line created by this function. Other Parameters ---------------- data : indexable object, optional DATA_PARAMETER_PLACEHOLDER **kwargs Keyword arguments control the properties: %(Line2D:kwdoc)s See Also -------- psd Plots the power spectral density. angle_spectrum Plots the angles of the corresponding frequencies. phase_spectrum Plots the phase (unwrapped angle) of the corresponding frequencies. specgram Can plot the magnitude spectrum of segments within the signal in a colormap.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:magnitude_spectrum arg:self arg:x arg:Fs arg:Fc arg:window arg:pad_to arg:sides arg:scale arguments arg arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Call If Compare Assign Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "make_sparse_coded_signal", + "source_code": "@validate_params({'n_samples': [Interval(Integral, 1, None, closed='left')], 'n_components': [Interval(Integral, 1, None, closed='left')], 'n_features': [Interval(Integral, 1, None, closed='left')], 'n_nonzero_coefs': [Interval(Integral, 1, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_sparse_coded_signal(n_samples, *, n_components, n_features, n_nonzero_coefs, random_state=None):\n generator = check_random_state(random_state)\n D = generator.standard_normal(size=(n_features, n_components))\n D /= np.sqrt(np.sum(D ** 2, axis=0))\n X = np.zeros((n_components, n_samples))\n for i in range(n_samples):\n idx = np.arange(n_components)\n generator.shuffle(idx)\n idx = idx[:n_nonzero_coefs]\n X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)\n Y = np.dot(D, X)\n Y, D, X = (Y.T, D.T, X.T)\n return map(np.squeeze, (Y, D, X))", + "docstring": "Generate a signal as a sparse combination of dictionary elements. Returns matrices , and such that where is of shape , is of shape , and each row of has exactly non-zero elements. Read more in the :ref:. Parameters ---------- n_samples : int Number of samples to generate. n_components : int Number of components in the dictionary. n_features : int Number of features of the dataset to generate. n_nonzero_coefs : int Number of active (non-zero) coefficients in each sample. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:. Returns ------- data : ndarray of shape (n_samples, n_features) The encoded signal (Y). dictionary : ndarray of shape (n_components, n_features) The dictionary with normalized components (D). code : ndarray of shape (n_samples, n_components) The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). Examples -------- >>> from sklearn.datasets import make_sparse_coded_signal >>> data, dictionary, code = make_sparse_coded_signal( ... n_samples=50, ... n_components=100, ... n_features=10, ... n_nonzero_coefs=4, ... random_state=0 ... ) >>> data.shape (50, 10) >>> dictionary.shape (100, 10) >>> code.shape (50, 100)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py", + "ast_data": "FunctionDef name:make_sparse_coded_signal arg:n_samples arguments arg arg arg arg arg Assign Call Assign Call Call Call Assign Call For Call Assign Call Call Assign Assign Call Assign Call Assign Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_common_pre_state_dict_hook", + "source_code": "def _common_pre_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState) -> None:\n if fsdp_state._device_handle.is_available():\n fsdp_state._device_handle.synchronize()\n _lazy_init(fsdp_state, module)\n if fsdp_state._is_root:\n _reset_flat_param_grad_info_if_needed(fsdp_state._all_handles)", + "docstring": "Performs the pre-state_dict tasks shared by all state_dict types.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py", + "ast_data": "FunctionDef name:_common_pre_state_dict_hook arg:module arg:fsdp_state arguments arg arg If Call Call Call If Call" + }, + { + "library": "tensorflow", + "name": "ensure_initialized", + "source_code": "def ensure_initialized():\n context().ensure_initialized()", + "docstring": "Initialize the context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:ensure_initialized arguments Call Call" + }, + { + "library": "tensorflow", + "name": "manage_all_configs", + "source_code": "def manage_all_configs(save_results, filename):\n all_configs = get_all_configs()\n print_all_configs(all_configs[0], all_configs[1], all_configs[2])\n if save_results:\n save_to_file(all_configs[3], filename)", + "docstring": "Manages configuration detection and retrieval based on user input. Args: save_results: Boolean indicating whether to save the results to a file. filename: String that is the name of the output JSON file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", + "ast_data": "FunctionDef name:manage_all_configs arg:save_results arg:filename arguments arg arg Assign Call Call If Call" + }, + { + "library": "cherrypy", + "name": "is_iterator", + "source_code": "def is_iterator(obj):\n from types import GeneratorType\n if isinstance(obj, GeneratorType):\n return True\n elif not hasattr(obj, '__iter__'):\n return False\n else:\n return iter(obj) is obj", + "docstring": "Detect if the object provided implements the iterator protocol. (i.e. like a generator). This will return False for objects which are iterable, but not iterators themselves.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:is_iterator arg:obj arguments arg If Call Return return:yes If Call Return return:yes Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "strip_overloads", + "source_code": "def strip_overloads(gm):\n for node in gm.graph.nodes:\n if isinstance(node.target, torch._ops.OpOverload):\n node.target = node.target.overloadpacket\n gm.recompile()", + "docstring": "Modifies the target of graph nodes in :attr: to strip overloads. Args: gm(fx.GraphModule): The input Fx graph module to be modified", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\microbenchmarks\\operatorbench.py", + "ast_data": "FunctionDef name:strip_overloads arg:gm arguments arg For If Call Assign Call" + }, + { + "library": "django", + "name": "ExternalReferenceForbidden", + "source_code": "class ExternalReferenceForbidden(DefusedXmlException):\n\n def __init__(self, context, base, sysid, pubid):\n super().__init__()\n self.context = context\n self.base = base\n self.sysid = sysid\n self.pubid = pubid\n\n def __str__(self):\n tpl = \"ExternalReferenceForbidden(system_id='{}', public_id={})\"\n return tpl.format(self.sysid, self.pubid)", + "docstring": "Resolving an external reference is forbidden.", + "type": "class", + "file_path": "django\\django\\core\\serializers\\xml_serializer.py", + "ast_data": "ClassDef name:ExternalReferenceForbidden FunctionDef name:__init__ arg:self arg:context arg:base arg:sysid arg:pubid arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "initial_scale", + "source_code": "@property\ndef initial_scale(self):\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.initial_loss_scale\n else:\n return self._loss_scale", + "docstring": "The initial loss scale. If is False, this is the same number as , as the loss scale never changes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:initial_scale arg:self arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_initialize_nodes_and_concrete_functions", + "source_code": "def _initialize_nodes_and_concrete_functions(self):\n self.nodes = list(self._trackable_objects)\n self.gradient_functions = []\n self.gradient_defs = []\n for obj in self.nodes:\n if obj in self._saveable_objects_map:\n for save_fn, restore_fn in self._saveable_objects_map[obj].values():\n self.node_ids[save_fn] = len(self.nodes)\n self.nodes.append(save_fn)\n self.node_ids[restore_fn] = len(self.nodes)\n self.nodes.append(restore_fn)\n self.concrete_functions = [obj for obj in self.nodes if isinstance(obj, defun.ConcreteFunction)]", + "docstring": "Creates graph with nodes for trackable objects and functions. Adds functions for each trackable object to and associated concrete functions to for serialization.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_initialize_nodes_and_concrete_functions arg:self arguments arg Assign Call Assign Assign For If Compare For Call Assign Call Call Assign Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "CanReshape", + "source_code": "class CanReshape(Constraint):\n\n def __init__(self, src, target):\n self.src = src\n self.target = target\n\n def __repr__(self):\n return f'can-reshape({self.src}, {self.target})'\n\n def __eq__(self, other):\n if isinstance(other, CanReshape):\n return self.src == other.src and self.target == other.target\n else:\n return False", + "docstring": "can_reshape constraint", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", + "ast_data": "ClassDef name:CanReshape FunctionDef name:__init__ arg:self arg:src arg:target arguments arg arg arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "_new_shared", + "source_code": "def _new_shared(self, size, *, device=None):\n if device is None:\n device = 'cpu'\n device = torch.device(device)\n untyped_storage = torch.UntypedStorage._new_shared(size * self._element_size(), device=device)\n return TypedStorage(wrap_storage=untyped_storage, dtype=self.dtype, _internal=True)", + "docstring": "Create a new storage in shared memory with the same data type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:_new_shared arg:self arg:size arguments arg arg arg If Compare Assign Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_convert_sparse", + "source_code": "def _convert_sparse(self, y):\n outputs = [self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)]\n assert all((isinstance(o, WrappedTensor) for o in outputs))\n if all((w.is_sparse_stacked for w in outputs)):\n return sparse_tensor.SparseTensor(*[w.t for w in outputs])\n assert not any((w.is_sparse_stacked for w in outputs)), 'Error converting SparseTensor. All components should be logically stacked, or none.'\n return self._restack_sparse_tensor_logically(*[self._unwrap_or_tile(w) for w in outputs])", + "docstring": "Returns the converted value corresponding to SparseTensor y. For SparseTensors, instead of stacking the component tensors separately, resulting in component tensors with shapes (N, m, rank), (N, m), and (N, rank) respectively for indices, values, and dense_shape (where N is the loop length and m is the number of sparse tensor values per loop iter), we want to logically stack the SparseTensors, to create a SparseTensor whose components are size (N * m, rank + 1), (N * m, ), and (rank + 1,) respectively. Here, we try to get the conversion of each component tensor. If the tensors are stacked via a sparse conversion, return the resulting SparseTensor composed of the converted components. Otherwise, the component tensors are either unstacked or stacked naively. In the latter case, we unstack the component tensors to reform loop_len SparseTensor elements, then correctly batch them. The unstacked tensors must have the same rank. Each dimension of each SparseTensor will expand to be the largest among all SparseTensor elements for that dimension. For example, if there are N SparseTensors of rank 3 being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i), the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)). Args: y: A tf.sparse.SparseTensor. Returns: A tf.sparse.SparseTensor that is the converted value corresponding to y.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_convert_sparse arg:self arg:y arguments arg arg Assign Call Call Call If Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "OutDtypeOperator", + "source_code": "class OutDtypeOperator(HigherOrderOperator):\n\n def __init__(self) -> None:\n super().__init__('out_dtype')\n\n def __call__(self, op, output_dtype, *args):\n if not isinstance(op, torch._ops.OpOverload):\n raise ValueError(\"out_dtype's first argument must be an OpOverload\")\n if op._schema.is_mutable:\n raise ValueError(\"out_dtype's first argument needs to be a functional operator\")\n if not (len(op._schema.returns) == 1 and isinstance(op._schema.returns[0].type, torch.TensorType)):\n raise ValueError(f\"out_dtype's can only apply to ops that return a single tensorInstead got {[r.type for r in op._schema.returns]}\")\n if op not in ALLOWABLE_OPS:\n raise ValueError(f'out_dtype only allows the following operators: {ALLOWABLE_OPS}.')\n res = super().__call__(op, output_dtype, *args)\n return res", + "docstring": "The out_dtype operator takes an existing ATen functional operator, an argument, and arguments to the original operator, and executes the original operator and returns a Tensor with the precision. This operator does not mandate a compute precision so it allows the representation to not be opinionated about the exact implementation. The general implementation for all operators will be the following: 1. Promote inputs dtypes based on default PyTorch dtype promotion rules, using the dtypes of all input Tensors/Scalars and the arugument. 2. Execute the operator 3. Cast the output to", + "type": "class", + "file_path": "pytorch\\torch\\_higher_order_ops\\out_dtype.py", + "ast_data": "ClassDef name:OutDtypeOperator FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:self arg:op arg:output_dtype arguments arg arg arg arg If Call Raise Call If Raise Call If BoolOp Compare Call Call Raise Call If Compare Raise Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "sym_min", + "source_code": "def sym_min(a, b):\n if overrides.has_torch_function((a, b)):\n return overrides.handle_torch_function(sym_min, (a, b), a, b)\n if isinstance(a, (SymInt, SymFloat)):\n return a.__sym_min__(b)\n elif isinstance(b, (SymInt, SymFloat)):\n return b.__sym_min__(a)\n all_types, float_types = __all_and_float_types()\n assert isinstance(a, all_types), type(a)\n assert isinstance(b, all_types), type(b)\n if isinstance(a, float_types) or isinstance(b, float_types):\n return builtins.float(builtins.min(a, b))\n else:\n return builtins.min(a, b)", + "docstring": "SymInt-aware utility for min().", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:sym_min arg:a arg:b arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Assign Call Call Call Call Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_load_all_device_dumps", + "source_code": "def _load_all_device_dumps(self, partition_graphs, validate):\n device_dirs = _glob(os.path.join(self._dump_root, METADATA_FILE_PREFIX + DEVICE_TAG + '*'))\n self._device_names = []\n self._t0s = {}\n self._dump_tensor_data = {}\n self._dump_graph_file_paths = {}\n self._debug_watches = {}\n self._watch_key_to_devices = {}\n self._watch_key_to_datum = {}\n self._watch_key_to_rel_time = {}\n self._watch_key_to_dump_size_bytes = {}\n for device_dir in device_dirs:\n device_name = device_path_to_device_name(device_dir)\n self._device_names.append(device_name)\n self._load_device_dumps(device_name, device_dir)\n self._load_partition_graphs(partition_graphs, validate)\n self._calculate_t0()\n for device_name in self._device_names:\n self._create_tensor_watch_maps(device_name)", + "docstring": "Load the dump data for all devices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:_load_all_device_dumps arg:self arg:partition_graphs arg:validate arguments arg arg arg Assign Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign For Assign Call Call Call Call Call For Call" + }, + { + "library": "pytorch", + "name": "create_cherry_pick_branch", + "source_code": "def create_cherry_pick_branch(github_actor: str, repo: GitRepo, pr: GitHubPR, commit_sha: str, onto_branch: str) -> str:\n repo.checkout(branch=onto_branch)\n repo._run_git('submodule', 'update', '--init', '--recursive')\n github_actor = re.sub('[^0-9a-zA-Z]+', '_', github_actor)\n cherry_pick_branch = f'cherry-pick-{pr.pr_num}-by-{github_actor}'\n repo.create_branch_and_checkout(branch=cherry_pick_branch)\n repo._run_git('cherry-pick', '-x', commit_sha)\n repo.push(branch=cherry_pick_branch, dry_run=False)\n return cherry_pick_branch", + "docstring": "Create a local branch and cherry pick the commit. Return the name of the local cherry picking branch.", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\cherry_pick.py", + "ast_data": "FunctionDef name:create_cherry_pick_branch arg:github_actor arg:repo arg:pr arg:commit_sha arg:onto_branch arguments arg arg arg arg arg Call Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "Glue", + "source_code": "class Glue(Node):\n\n def __init__(self, glue_type: _GlueSpec | T.Literal['fil', 'fill', 'filll', 'neg_fil', 'neg_fill', 'neg_filll', 'empty', 'ss']):\n super().__init__()\n if isinstance(glue_type, str):\n glue_spec = _GlueSpec._named[glue_type]\n elif isinstance(glue_type, _GlueSpec):\n glue_spec = glue_type\n else:\n raise ValueError('glue_type must be a glue spec name or instance')\n self.glue_spec = glue_spec\n\n def shrink(self) -> None:\n super().shrink()\n if self.size < NUM_SIZE_LEVELS:\n g = self.glue_spec\n self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR)", + "docstring": "Most of the information in this object is stored in the underlying `` class, which is shared between multiple glue objects. (This is a memory optimization which probably doesn't matter anymore, but it's easier to stick to what TeX does.)", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "ClassDef name:Glue FunctionDef name:__init__ arg:self arg:glue_type arguments arg arg Call Call If Call Assign If Call Assign Raise Call Assign FunctionDef name:shrink arg:self arguments arg Call Call If Compare Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X, y=None, groups=None):\n if groups is not None:\n warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n return self._split(X)", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "SvcOther", + "source_code": "def SvcOther(self, control):\n from cherrypy import process\n process.bus.publish(control_codes.key_for(control))", + "docstring": "Send a command to the service.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\win32.py", + "ast_data": "FunctionDef name:SvcOther arg:self arg:control arguments arg arg Call Call" + }, + { + "library": "pandas", + "name": "_values_for_factorize", + "source_code": "def _values_for_factorize(self) -> tuple[np.ndarray, Any]:\n return (self.astype(object), np.nan)", + "docstring": "Return an array and missing value suitable for factorization. Returns ------- values : ndarray An array suitable for factorization. This should maintain order and be a supported dtype (Float64, Int64, UInt64, String, Object). By default, the extension array is cast to object dtype. na_value : object The value in to consider missing. This will be treated as NA in the factorization routines, so it will be coded as and not included in . By default, `pandas.util.hash_pandas_object` method. Examples -------- >>> pd.array([1, 2, 3])._values_for_factorize() (array([1, 2, 3], dtype=object), nan)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_values_for_factorize arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "non_deterministic_ints", + "source_code": "def non_deterministic_ints(shape, dtype=dtypes.int64):\n return gen_stateful_random_ops.non_deterministic_ints(shape=shape, dtype=dtype)", + "docstring": "Non-deterministically generates some integers. This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. Args: shape: the shape of the result. dtype: (optional) the dtype of the result. Returns: a tensor whose element values are non-deterministically chosen.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:non_deterministic_ints arg:shape arg:dtype arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, maxes, mins):\n self.maxes = np.maximum(maxes, mins).astype(float)\n self.mins = np.minimum(maxes, mins).astype(float)\n self.m, = self.maxes.shape", + "docstring": "Construct a hyperrectangle.", + "type": "method", + "file_path": "scipy\\scipy\\spatial\\_kdtree.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:maxes arg:mins arguments arg arg arg Assign Call Call Assign Call Call Assign" + }, + { + "library": "scipy", + "name": "fft_mode", + "source_code": "@fft_mode.setter\ndef fft_mode(self, t: FFT_MODE_TYPE):\n if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)):\n raise ValueError(f\"fft_mode='{t}' not in {fft_mode_types}!\")\n if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win):\n raise ValueError(f\"One-sided spectra, i.e., fft_mode='{t}', \" + 'are not allowed for complex-valued windows!')\n if t == 'onesided2X' and self.scaling is None:\n raise ValueError(f\"For scaling is None, fft_mode='{t}' is invalid!Do scale_to('psd') or scale_to('magnitude')!\")\n self._fft_mode = t", + "docstring": "Set mode of FFT. Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. See the property for more details.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:fft_mode arg:self arg:t arguments arg arg If Compare Call Raise Call If BoolOp Compare Call Raise Call If BoolOp Compare Compare Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, obj, saveables):\n self._obj = obj\n self._saveables = saveables", + "docstring": "Constructor. Args: obj: A Trackable object. saveables: A list of saveables for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:obj arg:saveables arguments arg arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "convert_shape_to_symint", + "source_code": "def convert_shape_to_symint(lst: Iterable[Union[int, sympy.Expr]]) -> list[Union[int, torch.SymInt]]:\n return [convert_to_symint(i) for i in lst]", + "docstring": "Takes a list of shapes from Inductor and converts them into symints (or just ints if all shapes are static).", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:convert_shape_to_symint arg:lst arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "from_reference", + "source_code": "@staticmethod\ndef from_reference(cls, ref_qconvt, output_scale, output_zero_point):\n qconv = cls(ref_qconvt.in_channels, ref_qconvt.out_channels, ref_qconvt.kernel_size, ref_qconvt.stride, ref_qconvt.padding, ref_qconvt.output_padding, ref_qconvt.groups, ref_qconvt.bias is not None, ref_qconvt.dilation, ref_qconvt.padding_mode, device=ref_qconvt.weight.device, dtype=ref_qconvt.weight.dtype)\n qweight = ref_qconvt.get_quantized_weight()\n qconv.set_weight_bias(qweight, ref_qconvt.bias)\n qconv.scale = float(output_scale)\n qconv.zero_point = int(output_zero_point)\n return qconv", + "docstring": "Create a (fbgemm/qnnpack) quantized module from a reference quantized module Args: ref_qconvt (Module): a reference quantized module, either produced by torch.ao.quantization utilities or provided by the user output_scale (float): scale for output Tensor output_zero_point (int): zero point for output Tensor", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py", + "ast_data": "FunctionDef name:from_reference arg:cls arg:ref_qconvt arg:output_scale arg:output_zero_point arguments arg arg arg arg Assign Call Compare Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "IntelCCompiler", + "source_code": "class IntelCCompiler(UnixCCompiler):\n compiler_type = 'intel'\n cc_exe = 'icc'\n cc_args = 'fPIC'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n UnixCCompiler.__init__(self, verbose, dry_run, force)\n v = self.get_version()\n mpopt = 'openmp' if v and v < '15' else 'qopenmp'\n self.cc_exe = 'icc -fPIC -fp-model strict -O3 -fomit-frame-pointer -{}'.format(mpopt)\n compiler = self.cc_exe\n if platform.system() == 'Darwin':\n shared_flag = '-Wl,-undefined,dynamic_lookup'\n else:\n shared_flag = '-shared'\n self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', linker_so=compiler + ' ' + shared_flag + ' -shared-intel')", + "docstring": "A modified Intel compiler compatible with a GCC-built Python.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\intelccompiler.py", + "ast_data": "ClassDef name:IntelCCompiler Assign Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Call Assign BoolOp Compare Assign Call Assign If Compare Call Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "_n_features_out", + "source_code": "@property\ndef _n_features_out(self):\n return self.components_.shape[0]", + "docstring": "Number of transformed output features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py", + "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_input_dtype", + "source_code": "def _check_input_dtype(self, arg):\n if arg.dtype.base_dtype != self.dtype:\n raise TypeError('Expected argument to have dtype %s. Found: %s in tensor %s' % (self.dtype, arg.dtype, arg))", + "docstring": "Check that arg.dtype == self.dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_check_input_dtype arg:self arg:arg arguments arg arg If Compare Raise Call" + }, + { + "library": "pytorch", + "name": "find_nodes", + "source_code": "@compatibility(is_backward_compatible=False)\ndef find_nodes(self, *, op: str, target: Optional['Target']=None, sort: bool=True):\n node_list = self._find_nodes_lookup_table.find_nodes(op=op, target=target)\n if sort:\n return sorted(node_list)\n return node_list", + "docstring": "Allows for fast query of nodes Args: op (str): the name of the operation target (Optional[Target]): the target of the node. For call_function, the target is required. For other ops, the target is optional. sort (bool): whether to return nodes in the order they appear on on the graph. Returns: Iteratable of nodes with the requested op and target.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:find_nodes arg:self arguments arg arg arg arg Assign Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "is_language_prefix_patterns_used", + "source_code": "@functools.cache\ndef is_language_prefix_patterns_used(urlconf):\n for url_pattern in get_resolver(urlconf).url_patterns:\n if isinstance(url_pattern.pattern, LocalePrefixPattern):\n return (True, url_pattern.pattern.prefix_default_language)\n return (False, False)", + "docstring": "Return a tuple of two booleans: ( if i18n_patterns() (LocalePrefixPattern) is used in the URLconf, if the default language should be prefixed )", + "type": "function", + "file_path": "django\\django\\conf\\urls\\i18n.py", + "ast_data": "FunctionDef name:is_language_prefix_patterns_used arg:urlconf arguments arg For Call If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_process_constant", + "source_code": "def _process_constant(self, node: ast.Constant) -> None:\n if isinstance(node.value, str):\n docstring, modules = self._extract_docstring(node.value)\n if modules:\n self._exports.add_doc(exported_api.ExportedDoc.create(file_name=self._current_file, line_no=node.lineno, modules=modules, docstring=docstring))\n else:\n self.visit(node)", + "docstring": "Process top-level constant for a potential API docstring export.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py", + "ast_data": "FunctionDef name:_process_constant arg:self arg:node arguments arg arg If Call Assign Call If Call Call Call" + }, + { + "library": "matplotlib", + "name": "new_saved_frame_seq", + "source_code": "def new_saved_frame_seq(self):\n return self.new_frame_seq()", + "docstring": "Return a new sequence of saved/cached frame information.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:new_saved_frame_seq arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "diag_part", + "source_code": "def diag_part(self, name='diag_part'):\n with self._name_scope(name):\n return self._diag_part()", + "docstring": "Efficiently get the [batch] diagonal part of this operator. If this operator has shape , this returns a , of shape , where . Args: name: A name for this . Returns: diag_part: A of same as self.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:diag_part arg:self arg:name arguments arg arg With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "solvevec", + "source_code": "def solvevec(self, rhs, adjoint=False, name='solve'):\n with self._name_scope(name):\n rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n self._check_input_dtype(rhs)\n self_dim = -1 if adjoint else -2\n tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1])\n return self._solvevec(rhs, adjoint=adjoint)", + "docstring": "Solve single equation with best effort: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator. is treated like a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python . If , solve the system involving the adjoint of this : . name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:solvevec arg:self arg:rhs arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__contains__", + "source_code": "@final\ndef __contains__(self, key) -> bool:\n return key in self._info_axis", + "docstring": "True if the key is in the info axis", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_get_layout", + "source_code": "def _get_layout(name):\n cache = _get_layout.cache\n if not cache:\n for v in torch.__dict__.values():\n if isinstance(v, torch.layout):\n cache[str(v)] = v\n return cache[name]", + "docstring": "Get layout extension object from its string representation.", + "type": "function", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "FunctionDef name:_get_layout arg:name arguments arg Assign If For Call If Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "squash_mask", + "source_code": "def squash_mask(self, params_to_keep: Optional[tuple[str, ...]]=None, params_to_keep_per_layer: Optional[dict[str, tuple[str, ...]]]=None, *args, **kwargs):\n for config in self.groups:\n module = config['module']\n tensor_name = config['tensor_name']\n parametrize.remove_parametrizations(module, tensor_name, leave_parametrized=True)\n sparse_params = {}\n if params_to_keep is not None:\n global_params = {k: config[k] for k in params_to_keep}\n sparse_params.update(global_params)\n if params_to_keep_per_layer is not None:\n params = params_to_keep_per_layer.get(config['module_fqn'], None)\n if params is not None:\n per_layer_params = {k: config[k] for k in params}\n sparse_params.update(per_layer_params)\n if sparse_params:\n module.sparse_params = sparse_params", + "docstring": "Squashes the sparse masks into the appropriate tensors. If either the or is set, the module will have a dict attached to it. Args: params_to_keep: List of keys to save in the module or a dict representing the modules and keys that will have sparsity parameters saved params_to_keep_per_layer: Dict to specify the params that should be saved for specific layers. The keys in the dict should be the module fqn, while the values should be a list of strings with the names of the variables to save in the Examples: >>> # xdoctest: +SKIP(\"locals are undefined\") >>> # Don't save any sparse params >>> sparsifier.squash_mask() >>> hasattr(model.submodule1, 'sparse_params') False >>> # Keep sparse params per layer >>> sparsifier.squash_mask( ... params_to_keep_per_layer={ ... 'submodule1.linear1': ('foo', 'bar'), ... 'submodule2.linear42': ('baz',) ... }) >>> print(model.submodule1.linear1.sparse_params) {'foo': 42, 'bar': 24} >>> print(model.submodule2.linear42.sparse_params) {'baz': 0.1} >>> # Keep sparse params for all layers >>> sparsifier.squash_mask(params_to_keep=('foo', 'bar')) >>> print(model.submodule1.linear1.sparse_params) {'foo': 42, 'bar': 24} >>> print(model.submodule2.linear42.sparse_params) {'foo': 42, 'bar': 24} >>> # Keep some sparse params for all layers, and specific ones for >>> # some other layers >>> sparsifier.squash_mask( ... params_to_keep=('foo', 'bar'), ... params_to_keep_per_layer={ ... 'submodule2.linear42': ('baz',) ... }) >>> print(model.submodule1.linear1.sparse_params) {'foo': 42, 'bar': 24} >>> print(model.submodule2.linear42.sparse_params) {'foo': 42, 'bar': 24, 'baz': 0.1}", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py", + "ast_data": "FunctionDef name:squash_mask arg:self arg:params_to_keep arg:params_to_keep_per_layer arguments arg arg arg arg arg For Assign Assign Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Assign" + }, + { + "library": "tensorflow", + "name": "_load_layer", + "source_code": "def _load_layer(self, node_id, identifier, metadata):\n metadata = json_utils.decode(metadata)\n if node_id in self.loaded_nodes:\n node, setter = self.loaded_nodes[node_id]\n _maybe_add_serialized_attributes(node, metadata)\n config = metadata.get('config')\n if _is_graph_network(node) and generic_utils.validate_config(config):\n child_nodes = self._get_child_layer_node_ids(node_id)\n self.model_layer_dependencies[node_id] = (node, child_nodes)\n if not child_nodes:\n self._models_to_reconstruct.append(node_id)\n return (node, setter)\n obj, setter = self._revive_from_config(identifier, metadata, node_id)\n if obj is None:\n obj, setter = revive_custom_object(identifier, metadata)\n _maybe_add_serialized_attributes(obj, metadata)\n return (obj, setter)", + "docstring": "Load a single layer from a SavedUserObject proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_load_layer arg:self arg:node_id arg:identifier arg:metadata arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call If BoolOp Call Call Assign Call Assign If Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_default_getter", + "source_code": "def _default_getter(name, shape, dtype, initializer=None, partition_info=None, **kwargs):\n dtype = dtypes.as_dtype(dtype)\n shape_object = tensor_shape.as_shape(shape)\n with ops.init_scope():\n if initializer is None:\n initializer, initializing_from_value = variable_scope._get_default_variable_store()._get_default_initializer(name=name, shape=shape_object, dtype=dtype)\n else:\n initializing_from_value = not callable(initializer)\n variable_dtype = dtype.base_dtype\n if initializing_from_value:\n if shape is not None:\n raise ValueError('If initializer is a constant, do not specify shape.')\n initial_value = initializer\n else:\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n shape_list = None if shape is None else shape_object.as_list()\n if 'partition_info' in tf_inspect.getargspec(initializer).args:\n initial_value = functools.partial(initializer, shape_list, dtype=dtype, partition_info=partition_info)\n else:\n initial_value = functools.partial(initializer, shape_list, dtype=dtype)\n return variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=variable_dtype, use_resource=True, **kwargs)", + "docstring": "A pared-down version of get_variable which does not reuse variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:_default_getter arg:name arg:shape arg:dtype arg:initializer arg:partition_info arguments arg arg arg arg arg arg Assign Call Assign Call With Call If Compare Assign Call Call Assign Call Assign If If Compare Raise Call Assign If Call Call Assign Call Assign Compare Call If Compare Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "special_ortho_group_gen", + "source_code": "class special_ortho_group_gen(multi_rv_generic):\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, dim=None, seed=None):\n return special_ortho_group_frozen(dim, seed=seed)\n\n def _process_parameters(self, dim):\n if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n raise ValueError('Dimension of rotation must be specified,\\n and must be a scalar nonnegative integer.')\n return dim\n\n def rvs(self, dim, size=1, random_state=None):\n random_state = self._get_random_state(random_state)\n q = ortho_group.rvs(dim, size, random_state)\n dets = np.linalg.det(q)\n if dim:\n q[..., 0, :] /= dets[..., np.newaxis]\n return q", + "docstring": "A Special Orthogonal matrix (SO(N)) random variable. Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(N)) with a determinant of +1. The keyword specifies the dimension N. Methods ------- rvs(dim=None, size=1, random_state=None) Draw random samples from SO(N). Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNoneortho_groupscipy.spatial.transform.Rotation.randomdim` parameter, returning a \"frozen\" special_ortho_group random variable: >>> rv = special_ortho_group(5) >>> # Frozen object with the same methods but holding the >>> # dimension parameter fixed. See Also -------- ortho_group, scipy.spatial.transform.Rotation.random", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "ClassDef name:special_ortho_group_gen FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call If Return return:yes" + }, + { + "library": "sphinx", + "name": "validate_html_extra_path", + "source_code": "def validate_html_extra_path(app: Sphinx, config: Config) -> None:\n html_extra_path = []\n for entry in config.html_extra_path:\n extra_path = (app.confdir / entry).resolve()\n if extra_path.exists():\n if app.outdir.drive == extra_path.drive and extra_path.is_relative_to(app.outdir):\n logger.warning(__('html_extra_path entry %r is placed inside outdir'), entry)\n else:\n html_extra_path.append(entry)\n else:\n logger.warning(__('html_extra_path entry %r does not exist'), entry)\n config.html_extra_path = html_extra_path", + "docstring": "Check html_extra_paths setting.", + "type": "function", + "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py", + "ast_data": "FunctionDef name:validate_html_extra_path arg:app arg:config arguments arg arg Assign For Assign Call If Call If BoolOp Compare Call Call Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "histogram_fixed_width", + "source_code": "@tf_export('histogram_fixed_width')\n@dispatch.add_dispatch_support\ndef histogram_fixed_width(values, value_range, nbins=100, dtype=dtypes.int32, name=None):\n with ops.name_scope(name, 'histogram_fixed_width', [values, value_range, nbins]) as name:\n return gen_math_ops._histogram_fixed_width(values, value_range, nbins, dtype=dtype, name=name)", + "docstring": "Return histogram of values. Given the tensor , this operation returns a rank 1 histogram counting the number of entries in that fell into every bin. The bins are equal width and determined by the arguments and . Args: values: Numeric . value_range: Shape [2] of same as . values = value_range[1] will be mapped to hist[-1]. nbins: Scalar . Number of histogram bins. dtype: dtype for returned histogram. name: A name for this operation (defaults to 'histogram_fixed_width'). Returns: A 1-D holding histogram of values. Raises: TypeError: If any unsupported dtype is provided. tf.errors.InvalidArgumentError: If value_range does not satisfy value_range[0] >> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) ... >>> nbins = 5 >>> value_range = [0.0, 5.0] >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] >>> hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) >>> hist.numpy() array([2, 1, 1, 0, 2], dtype=int32)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\histogram_ops.py", + "ast_data": "FunctionDef name:histogram_fixed_width arg:values arg:value_range arg:nbins arg:dtype arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_join_sycl_home", + "source_code": "def _join_sycl_home(*paths) -> str:\n if SYCL_HOME is None:\n raise OSError('SYCL runtime is not dected. Please setup the pytorch prerequisites for Intel GPU following the instruction in https://github.com/pytorch/pytorch?tab=readme-ov-file#intel-gpu-support or install intel-sycl-rt via pip.')\n return os.path.join(SYCL_HOME, *paths)", + "docstring": "Join paths with SYCL_HOME, or raises an error if it SYCL_HOME is not found. This is basically a lazy way of raising an error for missing SYCL_HOME only once we need to get any SYCL-specific path.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\cpp_extension.py", + "ast_data": "FunctionDef name:_join_sycl_home arguments arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "replicate", + "source_code": "def replicate(dataset, devices):\n if not isinstance(dataset, data_types.DatasetV2):\n raise TypeError(f'Invalid `dataset`. Expected a `tf.data.Dataset` object but got {type(dataset)}.')\n dataset_device = dataset._variant_tensor.device\n datasets = {}\n if len(devices) == 1 and devices[0] == dataset_device:\n datasets[devices[0]] = dataset\n return datasets\n with ops.colocate_with(dataset._variant_tensor):\n dataset = dataset._apply_debug_options()\n graph_def = dataset._as_serialized_graph(strip_device_assignment=True, external_state_policy=ExternalStatePolicy.WARN)\n for device in devices:\n ds = _RemoteDataset(graph_def, device, dataset.element_spec)\n datasets[device] = ds\n return datasets", + "docstring": "A transformation that replicates onto a list of devices. Args: dataset: A object. devices: A list of devices to replicate the dataset on. Returns: A dictionary mapping device name to a dataset on that device.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py", + "ast_data": "FunctionDef name:replicate arg:dataset arg:devices arguments arg arg If Call Raise Call Call Assign Assign If BoolOp Compare Call Compare Assign Return return:yes With Call Assign Call Assign Call For Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "rewrite", + "source_code": "@tf_export(v1=['tpu.rewrite'])\n@traceback_utils.filter_traceback\ndef rewrite(computation: Callable[..., Any], inputs: Optional[List[List[Optional[core_types.Tensor]]]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, device_assignment: Optional[device_assignment_lib.DeviceAssignment]=None, name: Optional[Text]=None, xla_options: Optional[XLAOptions]=None) -> Any:\n return replicate(computation, None if inputs is None else [inputs], infeed_queue=infeed_queue, device_assignment=device_assignment, name=name, xla_options=xla_options)[0]", + "docstring": "Rewrites for execution on a TPU system. Args: computation: A Python function that builds a computation to apply to the input. If the function takes n inputs, 'inputs' should be a list of n tensors. may return a list of operations and tensors. Tensors must come before operations in the returned list. The return value of is a list of tensors corresponding to the tensors from the output of . All s constructed during will be executed when evaluating any of the returned output tensors, not just the ones returned. inputs: A list of input tensors or (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with . infeed_queue: If not , the from which to append a tuple of arguments as inputs to . device_assignment: if not , a describing the mapping between logical cores in the computation with physical cores in the TPU topology. May be omitted for a single-core computation, in which case the core attached to task 0, TPU device 0 is used. name: (Deprecated) Does nothing. xla_options: An instance of which indicates the options passed to XLA compiler. Use for default options. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output: a NoOp would be returned which control-depends on computation. 2) Single value output: A tuple containing the value would be returned. 3) Operation-only outputs: a NoOp would be returned which control-depends on computation. TODO(b/121383831): Investigate into removing these special cases.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:rewrite arg:computation arg:inputs arg:infeed_queue arg:device_assignment arg:name arg:xla_options arguments arg arg arg arg arg arg Return return:yes Call Compare Call" + }, + { + "library": "django", + "name": "_get_path_info_with_parent", + "source_code": "def _get_path_info_with_parent(self, filtered_relation):\n path = []\n opts = self.remote_field.model._meta.concrete_model._meta\n parent_opts = opts.get_field(self.object_id_field_name).model._meta\n target = parent_opts.pk\n path.append(PathInfo(from_opts=self.model._meta, to_opts=parent_opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation))\n parent_field_chain = []\n while parent_opts != opts:\n field = opts.get_ancestor_link(parent_opts.model)\n parent_field_chain.append(field)\n opts = field.remote_field.model._meta\n parent_field_chain.reverse()\n for field in parent_field_chain:\n path.extend(field.remote_field.path_infos)\n return path", + "docstring": "Return the path that joins the current model through any parent models. The idea is that if you have a GFK defined on a parent model then we need to join the parent model first, then the child model.", + "type": "method", + "file_path": "django\\django\\contrib\\contenttypes\\fields.py", + "ast_data": "FunctionDef name:_get_path_info_with_parent arg:self arg:filtered_relation arguments arg arg Assign Assign Assign Call Assign Call Call Assign While Compare Assign Call Call Assign Call For Call Return return:yes" + }, + { + "library": "scipy", + "name": "RegularGridInterpolatorSubclass", + "source_code": "class RegularGridInterpolatorSubclass(Benchmark):\n param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']\n params = [[2, 3, 4], [10, 40, 200], [10, 100, 1000, 10000], [1, -1]]\n\n def setup(self, ndim, max_coord_size, n_samples, flipped):\n rng = np.random.default_rng(314159)\n coord_sizes = [max_coord_size // 2 ** i for i in range(ndim)]\n self.points = [np.sort(rng.random(size=s))[::flipped] for s in coord_sizes]\n self.values = rng.random(size=coord_sizes)\n bounds = [(p.min(), p.max()) for p in self.points]\n xi = [rng.uniform(low, high, size=n_samples) for low, high in bounds]\n self.xi = np.array(xi).T\n self.interp = RegularGridInterpolatorValues(self.points, self.xi)\n\n def time_rgi_setup_interpolator(self, ndim, max_coord_size, n_samples, flipped):\n self.interp = RegularGridInterpolatorValues(self.points, self.xi)\n\n def time_rgi(self, ndim, max_coord_size, n_samples, flipped):\n self.interp(self.values)", + "docstring": "Benchmark RegularGridInterpolator with method=\"linear\".", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py", + "ast_data": "ClassDef name:RegularGridInterpolatorSubclass Assign Assign FunctionDef name:setup arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call FunctionDef name:time_rgi_setup_interpolator arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call FunctionDef name:time_rgi arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Call" + }, + { + "library": "pandas", + "name": "_set_encoding", + "source_code": "def _set_encoding(self) -> None:\n if self._format_version < 118:\n self._encoding = 'latin-1'\n else:\n self._encoding = 'utf-8'", + "docstring": "Set string encoding which depends on file version", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_set_encoding arg:self arguments arg If Compare Assign Assign" + }, + { + "library": "tensorflow", + "name": "backing_device", + "source_code": "@property\ndef backing_device(self):\n raise NotImplementedError()", + "docstring": "Returns the name of the device holding this tensor's memory. is usually the same as , which returns the device on which the kernel of the operation that produced this tensor ran. However, some operations can produce tensors on a different device (e.g., an operation that executes on the GPU but produces output tensors in host memory).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:backing_device arg:self arguments arg Raise Call" + }, + { + "library": "kornia", + "name": "inverse_keypoints", + "source_code": "def inverse_keypoints(self, input: Union[Tensor, Keypoints], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Keypoints]:\n if isinstance(input, Tensor):\n frame_num, batchsize = (input.size(0), input.size(1))\n input = Keypoints(input.view(-1, input.size(2), input.size(3)))\n input = super().inverse_keypoints(input, params, extra_args=extra_args)\n input = input.data.view(batchsize, frame_num, -1, 2)\n else:\n input = super().inverse_keypoints(input, params, extra_args=extra_args)\n return input", + "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:inverse_keypoints arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "call", + "source_code": "def call(self, inputs, state):\n cur_state_pos = 0\n cur_inp = inputs\n new_states = []\n for i, cell in enumerate(self._cells):\n with vs.variable_scope('cell_%d' % i):\n if self._state_is_tuple:\n if not nest.is_nested(state):\n raise ValueError('Expected state to be a tuple of length %d, but received: %s' % (len(self.state_size), state))\n cur_state = state[i]\n else:\n cur_state = array_ops.slice(state, [0, cur_state_pos], [-1, cell.state_size])\n cur_state_pos += cell.state_size\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n new_states = tuple(new_states) if self._state_is_tuple else array_ops.concat(new_states, 1)\n return (cur_inp, new_states)", + "docstring": "Run this multi-layer cell on inputs, starting from state.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg Assign Assign Assign For Call With Call If If Call Raise Call Call Assign Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "record_exception", + "source_code": "def record_exception(self, e: BaseException) -> None:\n file = self._get_error_file_path()\n if file:\n data = {'message': {'message': f'{type(e).__name__}: {e}', 'extraInfo': {'py_callstack': traceback.format_exc(), 'timestamp': str(int(time.time()))}}}\n with open(file, 'w') as fp:\n json.dump(data, fp)", + "docstring": "Write a structured information about the exception into an error file in JSON format. If the error file cannot be determined, then logs the content that would have been written to the error file.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\errors\\error_handler.py", + "ast_data": "FunctionDef name:record_exception arg:self arg:e arguments arg arg Assign Call If Assign Call Call Call Call Call With Call Call" + }, + { + "library": "pytorch", + "name": "watch", + "source_code": "def watch(obj: Any, guarded_code: Any) -> None:\n ensure_patched(type(obj))\n if obj not in MutationTracker.db:\n MutationTracker.db[obj] = MutationTracker()\n tracker = MutationTracker.db[obj]\n tracker.track(guarded_code)", + "docstring": "invalidate guarded_code when obj is mutated", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\mutation_guard.py", + "ast_data": "FunctionDef name:watch arg:obj arg:guarded_code arguments arg arg Call Call If Compare Assign Call Assign Call" + }, + { + "library": "pandas", + "name": "name", + "source_code": "@property\ndef name(self) -> Hashable:\n return self._name", + "docstring": "Return Index or MultiIndex name. Returns ------- label (hashable object) The name of the Index. See Also -------- Index.set_names: Able to set new names partially and by level. Index.rename: Able to set new names partially and by level. Series.name: Corresponding Series property. Examples -------- >>> idx = pd.Index([1, 2, 3], name=\"x\") >>> idx Index([1, 2, 3], dtype='int64', name='x') >>> idx.name 'x'", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "LeakyReLU", + "source_code": "class LeakyReLU(torch.nn.LeakyReLU):\n\n def __init__(self, scale: float, zero_point: int, negative_slope: float=0.01, inplace: bool=False, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(negative_slope, inplace)\n self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))\n self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))\n\n def forward(self, input):\n return torch.ops.quantized.leaky_relu(input, self.negative_slope, self.inplace, self.scale, self.zero_point)\n\n def _get_name(self):\n return 'QuantizedLeakyReLU'\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)\n\n @classmethod\n def from_reference(cls, mod, scale, zero_point):\n return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)", + "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor negative_slope: Controls the angle of the negative slope. Default: 1e-2", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py", + "ast_data": "ClassDef name:LeakyReLU FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:negative_slope arg:inplace arg:device arg:dtype arguments arg arg arg arg arg arg arg Assign Call Call Call Call Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "to_color_space", + "source_code": "def to_color_space(self, color_space: ColorSpace) -> Image:\n raise NotImplementedError", + "docstring": "Convert the image to a different color space.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:to_color_space arg:self arg:color_space arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_prune_nodes_from_input_and_recipient_maps", + "source_code": "def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):\n for node in nodes_to_prune:\n del self._node_inputs[node]\n del self._node_ctrl_inputs[node]\n del self._node_recipients[node]\n del self._node_ctrl_recipients[node]", + "docstring": "Prune nodes out of input and recipient maps. Args: nodes_to_prune: ( of ) Names of the nodes to be pruned.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py", + "ast_data": "FunctionDef name:_prune_nodes_from_input_and_recipient_maps arg:self arg:nodes_to_prune arguments arg arg For" + }, + { + "library": "sphinx", + "name": "_import_classes", + "source_code": "def _import_classes(self, class_names: list[str], currmodule: str) -> Sequence[type[Any]]:\n classes: list[type[Any]] = []\n for name in class_names:\n classes.extend(import_classes(name, currmodule))\n return classes", + "docstring": "Import a list of classes.", + "type": "method", + "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py", + "ast_data": "FunctionDef name:_import_classes arg:self arg:class_names arg:currmodule arguments arg arg arg For Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "broadcast", + "source_code": "def broadcast(tensor, devices=None, *, out=None):\n tensor = _handle_complex(tensor)\n if not (devices is None) ^ (out is None):\n raise RuntimeError(f\"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}\")\n if devices is not None:\n devices = [_get_device_index(d) for d in devices]\n return torch._C._broadcast(tensor, devices)\n else:\n return torch._C._broadcast_out(tensor, out)", + "docstring": "Broadcasts a tensor to specified GPU devices. Args: tensor (Tensor): tensor to broadcast. Can be on CPU or GPU. devices (Iterable[torch.device, str or int], optional): an iterable of GPU devices, among which to broadcast. out (Sequence[Tensor], optional, keyword-only): the GPU tensors to store output results. .. note:: Exactly one of :attr: and :attr: must be specified. Returns: - If :attr: is specified, a tuple containing copies of :attr:, placed on :attr:. - If :attr: is specified, a tuple containing :attr: tensors, each containing a copy of :attr:.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\parallel\\comm.py", + "ast_data": "FunctionDef name:broadcast arg:tensor arg:devices arguments arg arg arg Assign Call If Compare Compare Raise Call If Compare Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "and_", + "source_code": "def and_(a, b):\n a_val = a()\n if tensor_util.is_tf_type(a_val):\n return _tf_lazy_and(a_val, b)\n return _py_lazy_and(a_val, b)", + "docstring": "Functional form of \"and\". Uses lazy evaluation semantics.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py", + "ast_data": "FunctionDef name:and_ arg:a arg:b arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_make_dataset_iterator", + "source_code": "def _make_dataset_iterator(self, dataset):\n return input_lib_v1.DatasetIterator(dataset, self._input_workers, self._container_strategy())", + "docstring": "Make iterator from dataset without splitting the batch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:_make_dataset_iterator arg:self arg:dataset arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_is_v2_column", + "source_code": "@abc.abstractproperty\ndef _is_v2_column(self):\n pass", + "docstring": "Returns whether this FeatureColumn is fully conformant to the new API. This is needed for composition type cases where an EmbeddingColumn etc. might take in old categorical columns as input and then we want to use the old API.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py", + "ast_data": "FunctionDef name:_is_v2_column arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "get_arch_list", + "source_code": "def get_arch_list() -> list[str]:\n if not is_available():\n return []\n arch_flags = torch._C._cuda_getArchFlags()\n if arch_flags is None:\n return []\n return arch_flags.split()", + "docstring": "Return list CUDA architectures this library was compiled for.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:get_arch_list arguments If Call Return return:no Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "seaborn", + "name": "VarType", + "source_code": "class VarType(UserString):\n allowed = ('numeric', 'datetime', 'categorical', 'boolean', 'unknown')\n\n def __init__(self, data):\n assert data in self.allowed, data\n super().__init__(data)\n\n def __eq__(self, other):\n assert other in self.allowed, other\n return self.data == other", + "docstring": "Prevent comparisons elsewhere in the library from using the wrong name. Errors are simple assertions because users should not be able to trigger them. If that changes, they should be more verbose.", + "type": "class", + "file_path": "seaborn\\seaborn\\_core\\rules.py", + "ast_data": "ClassDef name:VarType Assign FunctionDef name:__init__ arg:self arg:data arguments arg arg Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Compare Return return:yes Compare" + }, + { + "library": "pandas", + "name": "build_table_schema", + "source_code": "def build_table_schema(data: DataFrame | Series, index: bool=True, primary_key: bool | None=None, version: bool=True) -> dict[str, JSONSerializable]:\n if index is True:\n data = set_default_names(data)\n schema: dict[str, Any] = {}\n fields = []\n if index:\n if data.index.nlevels > 1:\n data.index = cast('MultiIndex', data.index)\n for level, name in zip(data.index.levels, data.index.names):\n new_field = convert_pandas_type_to_json_field(level)\n new_field['name'] = name\n fields.append(new_field)\n else:\n fields.append(convert_pandas_type_to_json_field(data.index))\n if data.ndim > 1:\n for column, s in data.items():\n fields.append(convert_pandas_type_to_json_field(s))\n else:\n fields.append(convert_pandas_type_to_json_field(data))\n schema['fields'] = fields\n if index and data.index.is_unique and (primary_key is None):\n if data.index.nlevels == 1:\n schema['primaryKey'] = [data.index.name]\n else:\n schema['primaryKey'] = data.index.names\n elif primary_key is not None:\n schema['primaryKey'] = primary_key\n if version:\n schema['pandas_version'] = TABLE_SCHEMA_VERSION\n return schema", + "docstring": "Create a Table schema from `None'primaryKey'pandas_versionTable Schema anyenumorderedordered` field. Examples -------- >>> from pandas.io.json._table_schema import build_table_schema >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='D', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': [{'name': 'idx', 'type': 'integer'}, {'name': 'A', 'type': 'integer'}, {'name': 'B', 'type': 'string'}, {'name': 'C', 'type': 'datetime'}], 'primaryKey': ['idx'], 'pandas_version': '1.4.0'}", + "type": "function", + "file_path": "pandas\\pandas\\io\\json\\_table_schema.py", + "ast_data": "FunctionDef name:build_table_schema arg:data arg:index arg:primary_key arg:version arguments arg arg arg arg If Compare Assign Call Assign If If Compare Assign Call For Call Assign Call Assign Call Call Call If Compare For Call Call Call Call Call Assign If BoolOp Compare If Compare Assign Assign If Compare Assign If Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "refine_field", + "source_code": "def refine_field(self, z, triinterpolator=None, subdiv=3):\n if triinterpolator is None:\n interp = matplotlib.tri.CubicTriInterpolator(self._triangulation, z)\n else:\n _api.check_isinstance(matplotlib.tri.TriInterpolator, triinterpolator=triinterpolator)\n interp = triinterpolator\n refi_tri, found_index = self.refine_triangulation(subdiv=subdiv, return_tri_index=True)\n refi_z = interp._interpolate_multikeys(refi_tri.x, refi_tri.y, tri_index=found_index)[0]\n return (refi_tri, refi_z)", + "docstring": "Refine a field defined on the encapsulated triangulation. Parameters ---------- z : (npoints,) array-like Values of the field to refine, defined at the nodes of the encapsulated triangulation. (`~matplotlib.tri.TriInterpolator~matplotlib.tri.CubicTriInterpolator~matplotlib.tri.Triangulation` The returned refined triangulation. refi_z : 1D array of length: *refi_tri* node count. The returned interpolated field (at *refi_tri* nodes).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trirefine.py", + "ast_data": "FunctionDef name:refine_field arg:self arg:z arg:triinterpolator arg:subdiv arguments arg arg arg arg If Compare Assign Call Call Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_flop_count", + "source_code": "def _flop_count(idx_contraction, inner, num_terms, size_dictionary):\n overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)\n op_factor = max(1, num_terms - 1)\n if inner:\n op_factor += 1\n return overall_size * op_factor", + "docstring": "Computes the number of FLOPS in the contraction. Parameters ---------- idx_contraction : iterable The indices involved in the contraction inner : bool Does this contraction require an inner product? num_terms : int The number of terms in a contraction size_dictionary : dict The size of each of the indices in idx_contraction Returns ------- flop_count : int The total number of FLOPS required for the contraction. Examples -------- >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) 30 >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) 60", + "type": "function", + "file_path": "numpy\\numpy\\_core\\einsumfunc.py", + "ast_data": "FunctionDef name:_flop_count arg:idx_contraction arg:inner arg:num_terms arg:size_dictionary arguments arg arg arg arg Assign Call Assign Call If Return return:yes" + }, + { + "library": "pytorch", + "name": "_rewrite_tracepoint_node", + "source_code": "def _rewrite_tracepoint_node(gm: torch.fx.GraphModule):\n for node in gm.graph.nodes:\n if node.target == torch.ops.higher_order._export_tracepoint:\n if 'path' in node.kwargs:\n path = _strip_root(node.kwargs['path'])\n with gm.graph.inserting_before(node):\n new_node = gm.graph.create_node('call_function', torch.ops.higher_order._export_tracepoint, args=node.args, kwargs={'path': path, 'kind': node.kwargs['kind']})\n new_node.meta = node.meta\n node.replace_all_uses_with(new_node)\n gm.graph.erase_node(node)", + "docstring": "In-place modifiy input graph module by replacing the export tracepoint with a new node that has the same target and args, but with the _export_root stripped from path.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_rewrite_tracepoint_node arg:gm arguments arg For If Compare If Compare Assign Call With Call Assign Call Assign Call Call" + }, + { + "library": "django", + "name": "add_field", + "source_code": "def add_field(self, model, field):\n from django.db.models.expressions import Value\n if field.many_to_many and field.remote_field.through._meta.auto_created:\n self.create_model(field.remote_field.through)\n elif isinstance(field, CompositePrimaryKey):\n return\n elif field.primary_key or field.unique or (not field.null) or (self.effective_default(field) is not None) or (field.has_db_default() and (not isinstance(field.db_default, Value))):\n self._remake_table(model, create_field=field)\n else:\n super().add_field(model, field)", + "docstring": "Create a field on a model.", + "type": "method", + "file_path": "django\\django\\db\\backends\\sqlite3\\schema.py", + "ast_data": "FunctionDef name:add_field arg:self arg:model arg:field arguments arg arg arg If BoolOp Call If Call Return return:no If BoolOp Compare Call BoolOp Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "generate_partition_key", + "source_code": "def generate_partition_key(repo: str, doc: dict[str, Any]) -> str:\n workflow_id = doc['workflow_id']\n job_id = doc['job_id']\n test_name = doc['test_name']\n filename = doc['filename']\n hash_content = hashlib.md5(json.dumps(doc).encode('utf-8'), usedforsecurity=False).hexdigest()\n return f'{repo}/{workflow_id}/{job_id}/{test_name}/{filename}/{hash_content}'", + "docstring": "Generate an unique partition key for the document on DynamoDB", + "type": "function", + "file_path": "pytorch\\tools\\stats\\upload_dynamo_perf_stats.py", + "ast_data": "FunctionDef name:generate_partition_key arg:repo arg:doc arguments arg arg Assign Assign Assign Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_rebuild_sparse_tensor", + "source_code": "def _rebuild_sparse_tensor(layout, data):\n if layout == torch.sparse_coo:\n if len(data) == 3:\n indices, values, size = data\n is_coalesced = None\n else:\n indices, values, size, is_coalesced = data\n result = torch.sparse_coo_tensor(indices, values, size, check_invariants=False, is_coalesced=is_coalesced)\n _sparse_tensors_to_validate.append(result)\n return result\n elif layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:\n compressed_indices, plain_indices, values, size = data\n result = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size, layout=layout, check_invariants=False)\n _sparse_tensors_to_validate.append(result)\n return result\n raise NotImplementedError(f'rebuilding sparse tensor for layout {layout}')", + "docstring": "Rebuilds a sparse tensor from its sparse storage representation. Args: layout (str): The sparse storage layout of the tensor. data (tuple): The tensor's sparse storage representation.", + "type": "function", + "file_path": "pytorch\\torch\\_utils.py", + "ast_data": "FunctionDef name:_rebuild_sparse_tensor arg:layout arg:data arguments arg arg If Compare If Compare Call Assign Assign Assign Assign Call Call Return return:yes If Compare Assign Assign Call Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, layout, inputs, constant_args=()) -> None:\n assert len(inputs) == 4\n assert len(constant_args) == 0\n super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.quantized.int4mm_packed_weight_cpu.default, cpp_kernel_name='aoti_torch_cpu__weight_int4pack_mm_cpu_tensor')", + "docstring": "inputs = [x, w, qGroupSize, qScalesAndZeros] constant_args = ()", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arguments arg arg arg arg Compare Call Compare Call Call Call" + }, + { + "library": "scipy", + "name": "SpecialFunctionError", + "source_code": "class SpecialFunctionError(Exception):\n pass", + "docstring": "Exception that can be raised by special functions.", + "type": "class", + "file_path": "scipy\\scipy\\special\\_sf_error.py", + "ast_data": "ClassDef name:SpecialFunctionError" + }, + { + "library": "tensorflow", + "name": "add_run_metadata", + "source_code": "def add_run_metadata(self, run_metadata, tag, global_step=None):\n if tag in self._session_run_tags:\n raise ValueError('The provided tag was already used for this event type')\n self._session_run_tags[tag] = True\n tagged_metadata = event_pb2.TaggedRunMetadata()\n tagged_metadata.tag = tag\n tagged_metadata.run_metadata = run_metadata.SerializeToString()\n event = event_pb2.Event(tagged_run_metadata=tagged_metadata)\n self._add_event(event, global_step)", + "docstring": "Adds a metadata information for a single session.run() call. Args: run_metadata: A protobuf object. tag: The tag name for this metadata. global_step: Number. Optional global step counter to record with the StepStats. Raises: ValueError: If the provided tag was already used for this type of event.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py", + "ast_data": "FunctionDef name:add_run_metadata arg:self arg:run_metadata arg:tag arg:global_step arguments arg arg arg arg If Compare Raise Call Assign Assign Call Assign Assign Call Assign Call Call" + }, + { + "library": "virtualenv", + "name": "_validate", + "source_code": "def _validate(self):\n pass", + "docstring": "no op.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\config\\convert.py", + "ast_data": "FunctionDef name:_validate arg:self arguments arg" + }, + { + "library": "authlib", + "name": "update_client", + "source_code": "def update_client(self, client, client_metadata, request):\n raise NotImplementedError()", + "docstring": "Update the client in the database. Developers MUST implement this method in subclass:: def update_client(self, client, client_metadata, request): client.set_client_metadata( {**client.client_metadata, **client_metadata} ) client.save() return client :param client: the instance of OAuth client :param client_metadata: a dict of the client claims to update :param request: formatted request instance :return: client instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", + "ast_data": "FunctionDef name:update_client arg:self arg:client arg:client_metadata arg:request arguments arg arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "axes", + "source_code": "@property\ndef axes(self):\n return self._localaxes[:]", + "docstring": "List of Axes in the SubFigure. You can access and modify the Axes in the SubFigure through this list. Modifying this list has no effect. Instead, use , or to add or remove an Axes. Note: The property and method are equivalent.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, x, *args):\n self._compute_if_needed(x, *args)\n return self._value", + "docstring": "returns the function value", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_optimize.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg arg Call Return return:yes" + }, + { + "library": "kornia", + "name": "get_optical_pose_base", + "source_code": "def get_optical_pose_base(pinholes: Tensor) -> Tensor:\n if not (len(pinholes.shape) == 2 and pinholes.shape[1] == 12):\n raise AssertionError(pinholes.shape)\n raise NotImplementedError", + "docstring": "Compute extrinsic transformation matrices for pinholes. Args: pinholes: tensor of form [fx fy cx cy h w rx ry rz tx ty tz] of size (N, 12). Returns: tensor of extrinsic transformation matrices of size (N, 4, 4).", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:get_optical_pose_base arg:pinholes arguments arg If BoolOp Compare Call Compare Raise Call Raise" + }, + { + "library": "tensorflow", + "name": "_substitute_quantized_function_name_template", + "source_code": "def _substitute_quantized_function_name_template(module: str) -> str:\n compiled_regex = re.compile('GenerateQuantizedFunctionName(\\\\([\\\\w\\\\s\\\\\\'\\\\\"\\\\[\\\\],]+\\\\))')\n while True:\n func_match = re.search(compiled_regex, module)\n if func_match is None:\n break\n argument_string = func_match.group(1)\n if not argument_string.endswith(',)'):\n argument_string = argument_string[:-1] + ',)'\n arguments = ast.literal_eval(argument_string)\n if len(arguments) < 1 or len(arguments) > 2:\n raise ValueError('Wrong number of arguments to GenerateQuantizedFunctionName')\n quantized_ops = arguments[0]\n if not quantized_ops:\n raise ValueError('The quantized_ops list must not be empty')\n function_name = 'quantized_{}'.format(_format_snake_case_op_name(quantized_ops[0]))\n if len(quantized_ops) > 1:\n function_name += '_with_{}'.format(_format_snake_case_op_name(quantized_ops[1]))\n if len(quantized_ops) > 1:\n for quantized_op in quantized_ops[2:]:\n function_name += '_and_{}'.format(_format_snake_case_op_name(quantized_op))\n suffix = '_fn'\n if len(arguments) > 1 and arguments[1] == 'f32':\n suffix = '_float_output_fn'\n function_name += suffix\n module = re.sub(compiled_regex, function_name, module, count=1)\n return module", + "docstring": "Generates the quantized function name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\gen_quantized_function_library.py", + "ast_data": "FunctionDef name:_substitute_quantized_function_name_template arg:module arguments arg Assign Call While Assign Call If Compare Assign Call If Call Assign Assign Call If BoolOp Compare Call Compare Call Raise Call Assign If Raise Call Assign Call Call If Compare Call Call Call If Compare Call For Call Call Assign If BoolOp Compare Call Compare Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "allow_in_graph", + "source_code": "def allow_in_graph(fn):\n if isinstance(fn, (list, tuple)):\n return [allow_in_graph(x) for x in fn]\n assert callable(fn), 'allow_in_graph expects a callable'\n if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:\n fn_id = id(fn)\n trace_rules._disallowed_callable_ids.remove(fn_id)\n trace_rules._allowed_callable_ids.add(fn_id)\n\n def deregister():\n trace_rules._allowed_callable_ids.remove(fn_id)\n weakref.finalize(fn, deregister)\n return fn", + "docstring": "Tells the compiler frontend (Dynamo) to skip symbolic introspection of the function and instead directly write it to the graph when encountered. See :func:'s docstring for the full documentation WARNING: this API can be a footgun, please read the documentation carefully.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:allow_in_graph arg:fn arguments arg If Call Return return:yes Call Call If Compare Call Assign Call Call Call FunctionDef name:deregister arguments Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "pick_loop_order", + "source_code": "def pick_loop_order(stride_lengths: list[list[int]], sizes: Sequence[sympy.Expr], priority_idx: tuple[int, ...]=()) -> list[int]:\n\n @functools.cmp_to_key\n def index_cmp(a: int, b: int) -> int:\n if sizes[a] == 1 or sizes[b] == 1:\n return cmp(sizes[a] == 1, sizes[b] == 1)\n stride_len_a = [abs(sl[a]) for sl in stride_lengths]\n stride_len_b = [abs(sl[b]) for sl in stride_lengths]\n a_first = sum((sl_b == 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b)))\n b_first = sum((sl_a == 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b)))\n if a_first > b_first:\n return -1\n if b_first > a_first:\n return 1\n return cmp(b, a)\n order = list(reversed(range(len(stride_lengths[0]))))\n if len(priority_idx) > 0:\n stride_lengths = [stride_lengths[pi] for pi in priority_idx]\n if config.pick_loop_orders:\n order.sort(key=index_cmp)\n return order", + "docstring": "A heuristic to decide loop iteration orders. This has not been well tuned and may be something we should autotune.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:pick_loop_order arg:stride_lengths arg:sizes arg:priority_idx arguments arg arg arg FunctionDef name:index_cmp arg:a arg:b arguments arg arg If BoolOp Compare Compare Return return:yes Call Compare Compare Assign Call Assign Call Assign Call BoolOp Compare Compare Call Assign Call BoolOp Compare Compare Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call Assign Call Call Call Call If Compare Call Assign If Call Return return:yes" + }, + { + "library": "scipy", + "name": "_skip_if_lti", + "source_code": "def _skip_if_lti(arg):\n if isinstance(arg, tuple):\n return arg\n else:\n return (None,)", + "docstring": "Handle arg overloads. ATM, only pass tuples through. Consider updating when cupyx.lti class is supported.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_delegators.py", + "ast_data": "FunctionDef name:_skip_if_lti arg:arg arguments arg If Call Return return:yes Return return:no" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, aux_trans, extreme_finder=None, grid_locator1=None, grid_locator2=None, tick_formatter1=None, tick_formatter2=None):\n super().__init__()\n self._grid_info = None\n self.grid_finder = GridFinder(aux_trans, extreme_finder, grid_locator1, grid_locator2, tick_formatter1, tick_formatter2)", + "docstring": "Parameters ---------- aux_trans : or tuple[Callable, Callable] The transform from curved coordinates to rectilinear coordinate: either a instance (which provides also its inverse), or a pair of callables `` that define the transform and its inverse. The callables should have signature:: x_rect, y_rect = trans(x_curved, y_curved) x_curved, y_curved = inv_trans(x_rect, y_rect) extreme_finder grid_locator1, grid_locator2 Grid locators for each axis. tick_formatter1, tick_formatter2 Tick formatters for each axis.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:aux_trans arg:extreme_finder arg:grid_locator1 arg:grid_locator2 arg:tick_formatter1 arg:tick_formatter2 arguments arg arg arg arg arg arg arg Call Call Assign Assign Call" + }, + { + "library": "django", + "name": "referenced_base_fields", + "source_code": "@cached_property\ndef referenced_base_fields(self):\n from django.db.models.sql import query\n return {child.split(LOOKUP_SEP, 1)[0] for child in query.get_children_from_q(self)}", + "docstring": "Retrieve all base fields referenced directly or through F expressions excluding any fields referenced through joins.", + "type": "method", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "FunctionDef name:referenced_base_fields arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "ihfft", + "source_code": "@array_function_dispatch(_fft_dispatcher)\ndef ihfft(a, n=None, axis=-1, norm=None, out=None):\n a = asarray(a)\n if n is None:\n n = a.shape[axis]\n new_norm = _swap_direction(norm)\n out = rfft(a, n, axis, norm=new_norm, out=out)\n return conjugate(out, out=out)", + "docstring": "Compute the inverse FFT of a signal that has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT, the number of points along transformation axis in the input to use. If is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If is not given, the length of the input along the axis specified by is used. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. .. versionadded:: 1.20.0 The \"backward\", \"forward\" values were added. out : complex ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. .. versionadded:: 2.0.0 Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by , or the last one if is not specified. The length of the transformed axis is `hfftihfftrfftirffthfft`, within roundoff error. Examples -------- >>> import numpy as np >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary >>> np.fft.ihfft(spectrum) array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary", + "type": "function", + "file_path": "numpy\\numpy\\fft\\_pocketfft.py", + "ast_data": "FunctionDef name:ihfft arg:a arg:n arg:axis arg:norm arg:out arguments arg arg arg arg arg Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "field_references", + "source_code": "def field_references(model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None):\n remote_field = field.remote_field\n if not remote_field:\n return False\n references_to = None\n references_through = None\n if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple:\n to_fields = getattr(field, 'to_fields', None)\n if reference_field_name is None or to_fields is None or (None in to_fields and (reference_field is None or reference_field.primary_key)) or (reference_field_name in to_fields):\n references_to = (remote_field, to_fields)\n through = getattr(remote_field, 'through', None)\n if through and resolve_relation(through, *model_tuple) == reference_model_tuple:\n through_fields = remote_field.through_fields\n if reference_field_name is None or through_fields is None or reference_field_name in through_fields:\n references_through = (remote_field, through_fields)\n if not (references_to or references_through):\n return False\n return FieldReference(references_to, references_through)", + "docstring": "Return either False or a FieldReference if references provided context. False positives can be returned if is provided without because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place.", + "type": "function", + "file_path": "django\\django\\db\\migrations\\utils.py", + "ast_data": "FunctionDef name:field_references arg:model_tuple arg:field arg:reference_model_tuple arg:reference_field_name arg:reference_field arguments arg arg arg arg arg Assign If Return return:yes Assign Assign If Compare Call Assign Call If BoolOp Compare Compare BoolOp Compare BoolOp Compare Compare Assign Assign Call If BoolOp Compare Call Assign If BoolOp Compare Compare Compare Assign If BoolOp Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "geos_version", + "source_code": "def geos_version(self):\n return self._get_spatialite_func('geos_version()')", + "docstring": "Return the version of GEOS used by SpatiaLite as a string.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py", + "ast_data": "FunctionDef name:geos_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_size", + "source_code": "def get_size(self):\n return self._size", + "docstring": "Get the text size.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:get_size arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "preserve_original_messages", + "source_code": "def preserve_original_messages(self) -> None:\n raise NotImplementedError", + "docstring": "Preserve original translatable messages.", + "type": "method", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "FunctionDef name:preserve_original_messages arg:self arguments arg Raise" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None):\n pass", + "docstring": "Return the score of the model on the data . Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : Ignored Not used, present for API consistency by convention. Returns ------- score : float", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg" + }, + { + "library": "pytorch", + "name": "_dispatch_opset_version", + "source_code": "def _dispatch_opset_version(target: OpsetVersion, registered_opsets: Collection[OpsetVersion]) -> Optional[OpsetVersion]:\n if not registered_opsets:\n return None\n descending_registered_versions = sorted(registered_opsets, reverse=True)\n if target >= _constants.ONNX_BASE_OPSET:\n for version in descending_registered_versions:\n if version <= target:\n return version\n return None\n for version in reversed(descending_registered_versions):\n if target <= version <= _constants.ONNX_BASE_OPSET:\n return version\n return None", + "docstring": "Finds the registered opset given a target opset version and the available opsets. Args: target: The target opset version. registered_opsets: The available opsets. Returns: The registered opset version.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py", + "ast_data": "FunctionDef name:_dispatch_opset_version arg:target arg:registered_opsets arguments arg arg If Return return:no Assign Call If Compare For If Compare Return return:yes Return return:no For Call If Compare Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "check_sparse_tensor_invariants", + "source_code": "class check_sparse_tensor_invariants:\n\n @staticmethod\n def is_enabled():\n return torch._C._check_sparse_tensor_invariants()\n\n @staticmethod\n def enable():\n torch._C._set_check_sparse_tensor_invariants(True)\n\n @staticmethod\n def disable():\n torch._C._set_check_sparse_tensor_invariants(False)\n\n def __init__(self, enable=True):\n self.state = enable\n self.saved_state: Optional[bool] = None\n\n def __enter__(self):\n if self.saved_state is not None:\n raise RuntimeError('This context manager instance is already activated. Use a different context manager instance for context nesting.')\n self.saved_state = self.is_enabled()\n torch._C._set_check_sparse_tensor_invariants(self.state)\n\n def __exit__(self, type, value, traceback):\n assert self.saved_state is not None\n torch._C._set_check_sparse_tensor_invariants(self.saved_state)\n self.saved_state = None\n\n def __call__(self, mth):\n\n def test_mth(*args, **kwargs):\n with type(self)(self.state):\n return mth(*args, **kwargs)\n return test_mth", + "docstring": "A tool to control checking sparse tensor invariants. The following options exists to manage sparsr tensor invariants checking in sparse tensor construction: 1. Using a context manager: .. code:: python with torch.sparse.check_sparse_tensor_invariants(): run_my_model() 2. Using a procedural approach: .. code:: python prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled() torch.sparse.check_sparse_tensor_invariants.enable() run_my_model() if not prev_checks_enabled: torch.sparse.check_sparse_tensor_invariants.disable() 3. Using function decoration: .. code:: python @torch.sparse.check_sparse_tensor_invariants() def run_my_model(): ... run_my_model() 4. Using `crow_indices[..., -1] == nnz` is not satisfied.", + "type": "class", + "file_path": "pytorch\\torch\\sparse\\__init__.py", + "ast_data": "ClassDef name:check_sparse_tensor_invariants FunctionDef name:is_enabled arguments Return return:yes Call FunctionDef name:enable arguments Call FunctionDef name:disable arguments Call FunctionDef name:__init__ arg:self arg:enable arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Raise Call Assign Call Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Compare Call Assign FunctionDef name:__call__ arg:self arg:mth arguments arg arg FunctionDef name:test_mth arguments arg arg With Call Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "asend", + "source_code": "async def asend(self, sender, **named):\n if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:\n return []\n sync_receivers, async_receivers = self._live_receivers(sender)\n if sync_receivers:\n\n @sync_to_async\n def sync_send():\n responses = []\n for receiver in sync_receivers:\n response = receiver(signal=self, sender=sender, **named)\n responses.append((receiver, response))\n return responses\n else:\n\n async def sync_send():\n return []\n responses, async_responses = await asyncio.gather(sync_send(), asyncio.gather(*(receiver(signal=self, sender=sender, **named) for receiver in async_receivers)))\n responses.extend(zip(async_receivers, async_responses))\n return responses", + "docstring": "Send signal from sender to all connected receivers in async mode. All sync receivers will be wrapped by sync_to_async() If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. If any receivers are synchronous, they are grouped and called behind a sync_to_async() adaption before executing any asynchronous receivers. If any receivers are asynchronous, they are grouped and executed concurrently with asyncio.gather(). Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ...].", + "type": "method", + "file_path": "django\\django\\dispatch\\dispatcher.py", + "ast_data": "AsyncFunctionDef name:asend arg:self arg:sender arguments arg arg arg If BoolOp Compare Call Return return:no Assign Call If FunctionDef name:sync_send arguments Assign For Assign Call Call Return return:yes AsyncFunctionDef name:sync_send arguments Return return:no Assign Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "inference_fn", + "source_code": "@property\ndef inference_fn(self):\n return self._inference_function", + "docstring": "Return the inference function associated with this ConcreteFunction.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:inference_fn arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_finish_prob_for_one_fiber", + "source_code": "def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):\n x = self._maybe_rotate_dims(x, rotate_right=True)\n prob = self.distribution.prob(x)\n if self._is_maybe_event_override:\n prob = math_ops.reduce_prod(prob, self._reduce_event_indices)\n prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))\n if self._is_maybe_event_override and isinstance(event_ndims, int):\n prob.set_shape(array_ops.broadcast_static_shape(y.get_shape().with_rank_at_least(1)[:-event_ndims], self.batch_shape))\n return prob", + "docstring": "Finish computation of prob on one element of the inverse image.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_finish_prob_for_one_fiber arg:self arg:y arg:x arg:ildj arg:event_ndims arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Call Call If BoolOp Call Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "trace", + "source_code": "def trace(self, offset=0):\n return self.diagonal(k=offset).sum()", + "docstring": "Returns the sum along diagonals of the sparse array/matrix. Parameters ---------- offset : int, optional Which diagonal to get, corresponding to elements a[i, i+offset]. Default: 0 (the main diagonal).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:trace arg:self arg:offset arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, language, domain=None, localedirs=None):\n gettext_module.GNUTranslations.__init__(self)\n if domain is not None:\n self.domain = domain\n self.__language = language\n self.__to_language = to_language(language)\n self.__locale = to_locale(language)\n self._catalog = None\n self.plural = lambda n: int(n != 1)\n if self.domain == 'django':\n if localedirs is not None:\n warnings.warn(\"localedirs is ignored when domain is 'django'.\", RuntimeWarning)\n localedirs = None\n self._init_translation_catalog()\n if localedirs:\n for localedir in localedirs:\n translation = self._new_gnu_trans(localedir)\n self.merge(translation)\n else:\n self._add_installed_apps_translations()\n self._add_local_translations()\n if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and (self._catalog is None):\n raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE)\n self._add_fallback(localedirs)\n if self._catalog is None:\n self._catalog = TranslationCatalog()", + "docstring": "Create a GNUTranslations() using many locale directories", + "type": "method", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:language arg:domain arg:localedirs arguments arg arg arg arg Call If Compare Assign Assign Assign Call Assign Call Assign Assign arguments arg Call Compare If Compare If Compare Call Assign Call If For Assign Call Call Call Call If BoolOp Compare Compare Compare Raise Call Call If Compare Assign Call" + }, + { + "library": "tensorflow", + "name": "read_up_to", + "source_code": "def read_up_to(self, queue, num_records, name=None):\n if isinstance(queue, tensor_lib.Tensor):\n queue_ref = queue\n else:\n queue_ref = queue.queue_ref\n if self._reader_ref.dtype == dtypes.resource:\n return gen_io_ops.reader_read_up_to_v2(self._reader_ref, queue_ref, num_records, name=name)\n else:\n old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)\n return gen_io_ops.reader_read_up_to(self._reader_ref, old_queue_op, num_records, name=name)", + "docstring": "Returns up to num_records (key, value) pairs produced by a reader. Will dequeue a work unit from queue if necessary (e.g., when the Reader needs to start reading from a new file since it has finished with the previous file). It may return less than num_records even before the last batch. Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. num_records: Number of records to read. name: A name for the operation (optional). Returns: A tuple of Tensors (keys, values). keys: A 1-D string Tensor. values: A 1-D string Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", + "ast_data": "FunctionDef name:read_up_to arg:self arg:queue arg:num_records arg:name arguments arg arg arg arg If Call Assign Assign If Compare Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Fixed", + "source_code": "class Fixed(_Base):\n\n def __init__(self, fixed_size):\n _api.check_isinstance(Real, fixed_size=fixed_size)\n self.fixed_size = fixed_size\n\n def get_size(self, renderer):\n rel_size = 0.0\n abs_size = self.fixed_size\n return (rel_size, abs_size)", + "docstring": "Simple fixed size with absolute part = *fixed_size* and relative part = 0.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", + "ast_data": "ClassDef name:Fixed FunctionDef name:__init__ arg:self arg:fixed_size arguments arg arg Call Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "CompositeAffine2D", + "source_code": "class CompositeAffine2D(Affine2DBase):\n\n def __init__(self, a, b, **kwargs):\n if not a.is_affine or not b.is_affine:\n raise ValueError(\"'a' and 'b' must be affine transforms\")\n if a.output_dims != b.input_dims:\n raise ValueError(\"The output dimension of 'a' must be equal to the input dimensions of 'b'\")\n self.input_dims = a.input_dims\n self.output_dims = b.output_dims\n super().__init__(**kwargs)\n self._a = a\n self._b = b\n self.set_children(a, b)\n self._mtx = None\n\n @property\n def depth(self):\n return self._a.depth + self._b.depth\n\n def _iter_break_from_left_to_right(self):\n for left, right in self._a._iter_break_from_left_to_right():\n yield (left, right + self._b)\n for left, right in self._b._iter_break_from_left_to_right():\n yield (self._a + left, right)\n __str__ = _make_str_method('_a', '_b')\n\n def get_matrix(self):\n if self._invalid:\n self._mtx = np.dot(self._b.get_matrix(), self._a.get_matrix())\n self._inverted = None\n self._invalid = 0\n return self._mtx", + "docstring": "A composite transform formed by applying transform *a* then transform *b*. This version is an optimization that handles the case where both *a* and *b* are 2D affines.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "ClassDef name:CompositeAffine2D FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg arg If BoolOp Raise Call If Compare Raise Call Assign Assign Call Call Assign Assign Call Assign FunctionDef name:depth arg:self arguments arg Return return:yes FunctionDef name:_iter_break_from_left_to_right arg:self arguments arg For Call For Call Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Call Call Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_loss_object", + "source_code": "def _get_loss_object(self, loss):\n if loss is None:\n return None\n loss = losses_mod.get(loss)\n if not isinstance(loss, losses_mod.Loss):\n loss_name = get_custom_object_name(loss)\n if loss_name is None:\n raise ValueError('Loss should be a callable, found: {}'.format(loss))\n loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)\n loss._allow_sum_over_batch_size = True\n return loss", + "docstring": "Returns a object. Converts the user-supplied loss to a object. Also allows reduction to be used for this loss. Args: loss: A string, function, or object. Returns: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:_get_loss_object arg:self arg:loss arguments arg arg If Compare Return return:no Assign Call If Call Assign Call If Compare Raise Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "run_decompositions", + "source_code": "@_disable_prexisiting_fake_mode\ndef run_decompositions(self, decomp_table: Optional[dict[torch._ops.OperatorBase, Callable]]=None, decompose_custom_triton_ops: bool=False) -> 'ExportedProgram':\n _decomp_table = default_decompositions() if decomp_table is None else dict(decomp_table)\n if isinstance(_decomp_table, CustomDecompTable):\n _decomp_table = _decomp_table.materialize()\n cia_to_decomp, python_decomp_table = _split_decomp_table_to_cia_and_python_decomp(_decomp_table)\n return _decompose_exported_program(self, cia_to_decomp=cia_to_decomp, python_decomp_table=python_decomp_table, joint_loss_index=None, decompose_custom_triton_ops=decompose_custom_triton_ops)", + "docstring": "Run a set of decompositions on the exported program and returns a new exported program. By default we will run the Core ATen decompositions to get operators in the _. For now, we do not decompose joint graphs. Args: decomp_table: An optional argument that specifies decomp behaviour for Aten ops (1) If None, we decompose to core aten decompositions (2) If empty, we don't decompose any operator Some examples: If you don't want to decompose anything .. code-block:: python ep = torch.export.export(model, ...) ep = ep.run_decompositions(decomp_table={}) If you want to get a core aten operator set except for certain operator, you can do following: .. code-block:: python ep = torch.export.export(model, ...) decomp_table = torch.export.default_decompositions() decomp_table[your_op] = your_custom_decomp ep = ep.run_decompositions(decomp_table=decomp_table)", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:run_decompositions arg:self arg:decomp_table arg:decompose_custom_triton_ops arguments arg arg arg Assign Compare Call Call If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_custom_objects", + "source_code": "def get_custom_objects():\n return _GLOBAL_CUSTOM_OBJECTS", + "docstring": "Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using is preferred, but can be used to directly access the current collection of custom objects. Example: Returns: Global dictionary of names to classes ().", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", + "ast_data": "FunctionDef name:get_custom_objects arguments Return return:yes" + }, + { + "library": "pandas", + "name": "_setitem_single_block", + "source_code": "def _setitem_single_block(self, indexer, value, name: str) -> None:\n from pandas import Series\n if isinstance(value, ABCSeries) and name != 'iloc' or isinstance(value, dict):\n value = self._align_series(indexer, Series(value))\n info_axis = self.obj._info_axis_number\n item_labels = self.obj._get_axis(info_axis)\n if isinstance(indexer, tuple):\n if self.ndim == len(indexer) == 2 and is_integer(indexer[1]) and com.is_null_slice(indexer[0]):\n col = item_labels[indexer[info_axis]]\n if len(item_labels.get_indexer_for([col])) == 1:\n loc = item_labels.get_loc(col)\n self._setitem_single_column(loc, value, indexer[0])\n return\n indexer = maybe_convert_ix(*indexer)\n if isinstance(value, ABCDataFrame) and name != 'iloc':\n value = self._align_frame(indexer, value)._values\n self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)", + "docstring": "_setitem_with_indexer for the case when we have a single Block.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_setitem_single_block arg:self arg:indexer arg:value arg:name arguments arg arg arg arg If BoolOp BoolOp Call Compare Call Assign Call Call Assign Assign Call If Call If BoolOp Compare Call Call Call Assign If Compare Call Call Assign Call Call Return return:no Assign Call If BoolOp Call Compare Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "set_sharing_strategy", + "source_code": "def set_sharing_strategy(new_strategy):\n global _sharing_strategy\n assert new_strategy in _all_sharing_strategies\n _sharing_strategy = new_strategy", + "docstring": "Set the strategy for sharing CPU tensors. Args: new_strategy (str): Name of the selected strategy. Should be one of the values returned by :func:.", + "type": "function", + "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", + "ast_data": "FunctionDef name:set_sharing_strategy arg:new_strategy arguments arg Compare Assign" + }, + { + "library": "tensorflow", + "name": "_dynamic_ragged_shape_init", + "source_code": "def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions):\n assert isinstance(fields, dict), fields\n assert isinstance(shape, tensor_shape.TensorShape), shape\n assert nrows is None or isinstance(nrows, tensor.Tensor), nrows\n assert isinstance(row_partitions, tuple), row_partitions\n rank = shape.rank\n if rank is None:\n raise TypeError(\"StructuredTensor's shape must have known rank.\")\n dtype = _find_shape_dtype(fields, nrows, row_partitions)\n if rank == 0:\n return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape(array_ops.zeros((0,), dtype=dtype))\n if rank == 1:\n alt_value = shape[0]\n if isinstance(alt_value, tensor_shape.Dimension):\n alt_value = alt_value.value\n if alt_value is not None:\n nrows = alt_value\n return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape([nrows], dtype=dtype)\n return dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions(row_partitions, dtype=dtype)", + "docstring": "Produce a DynamicRaggedShape for StructuredTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor_dynamic.py", + "ast_data": "FunctionDef name:_dynamic_ragged_shape_init arg:fields arg:shape arg:nrows arg:row_partitions arguments arg arg arg arg Call Call BoolOp Compare Call Call Assign If Compare Raise Call Assign Call If Compare Return return:yes Call Call If Compare Assign If Call Assign If Compare Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_fix_linecache_record", + "source_code": "def _fix_linecache_record(obj):\n if hasattr(obj, '__module__'):\n obj_file = inspect.getfile(obj)\n obj_module = obj.__module__\n loaded_modules = tuple(sys.modules.values())\n for m in loaded_modules:\n if hasattr(m, '__file__') and m.__file__ == obj_file:\n if obj_module is not m:\n linecache.updatecache(obj_file, m.__dict__)", + "docstring": "Fixes potential corruption of linecache in the presence of functools.wraps. functools.wraps modifies the target object's __module__ field, which seems to confuse linecache in special instances, for example when the source is loaded from a .par file (see This function simply triggers a call to linecache.updatecache when a mismatch was detected between the object's __module__ property and the object's source file. Args: obj: Any", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py", + "ast_data": "FunctionDef name:_fix_linecache_record arg:obj arguments arg If Call Assign Call Assign Assign Call Call For If BoolOp Call Compare If Compare Call" + }, + { + "library": "numpy", + "name": "min", + "source_code": "@array_function_dispatch(_min_dispatcher)\ndef min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue):\n return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims, initial=initial, where=where)", + "docstring": "Return the minimum of an array or minimum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See :ref: for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then will not be passed through to the `ndarraykeepdims~numpy.ufunc.reduce~numpy.ufunc.reduceaaxisaxisaxis~numpy.min` argument. >>> np.min([6], initial=5) 5 >>> min([6], default=5) 6", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:min arg:a arg:axis arg:out arg:keepdims arg:initial arg:where arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_check_input_partition_dims", + "source_code": "def _check_input_partition_dims(self, tensor, dims):\n if dims is None:\n return\n dims = np.array(dims)\n if (dims < 1).any():\n raise ValueError('All input partition dims must be >= 1.')\n if dims.prod() == 1:\n return\n if dims.prod() != self._device_assignment.num_cores_per_replica:\n raise ValueError('The product of each input partition dim should equal to num_cores_per_replica. (dim = {}, num_cores_per_replica = {})'.format(dims, self._device_assignment.num_cores_per_replica))\n if dims.shape[0] != tensor.shape.ndims:\n raise ValueError('Input partition dims must have the same number of dimensions as the `Tensor` to be partitioned. (tensor shape = {}, input partition dims = {}).'.format(tensor.shape.as_list(), dims))\n tensor.shape.assert_is_fully_defined()", + "docstring": "Checks that input partition dims are valid for the . Args: tensor: Input tensor for partitioning. dims: A list of integer describes how to partition the input tensor. Raises: ValueError: If the tensor can't be partitioned by dims or the num_cores_per_replica doesn't match the number of partitions(dims.prod()).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:_check_input_partition_dims arg:self arg:tensor arg:dims arguments arg arg arg If Compare Return return:no Assign Call If Call Compare Raise Call If Compare Call Return return:no If Compare Call Raise Call Call If Compare Raise Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "cosine_distance", + "source_code": "@tf_export(v1=['losses.cosine_distance'])\n@dispatch.add_dispatch_support\n@deprecated_args(None, 'dim is deprecated, use axis instead', 'dim')\ndef cosine_distance(labels, predictions, axis=None, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS, dim=None):\n axis = deprecated_argument_lookup('axis', axis, 'dim', dim)\n if axis is None:\n raise ValueError('You must specify argument `axis`.')\n if labels is None:\n raise ValueError('Argument `labels` must not be None.')\n if predictions is None:\n raise ValueError('Argument `predictions` must not be None.')\n with ops.name_scope(scope, 'cosine_distance_loss', (predictions, labels, weights)) as scope:\n predictions = math_ops.cast(predictions, dtype=dtypes.float32)\n labels = math_ops.cast(labels, dtype=dtypes.float32)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n radial_diffs = math_ops.multiply(predictions, labels)\n losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)\n return compute_weighted_loss(losses, weights, scope, loss_collection, reduction=reduction)", + "docstring": "Adds a cosine-distance loss to the training procedure. Note that the function assumes that and are already unit-normalized. Args: labels: whose shape matches 'predictions' predictions: An arbitrary matrix. axis: The dimension along which the cosine distance is computed. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. dim: The old (deprecated) name for . Returns: Weighted loss float . If is , this has the same shape as ; otherwise, it is scalar. Raises: ValueError: If shape doesn't match shape, or , , or is . @compatibility(eager) The argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a . @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", + "ast_data": "FunctionDef name:cosine_distance arg:labels arg:predictions arg:axis arg:weights arg:scope arg:loss_collection arg:reduction arg:dim arguments arg arg arg arg arg arg arg arg Assign Call If Compare Raise Call If Compare Raise Call If Compare Raise Call With Call Assign Call Assign Call Call Call Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "__iadd__", + "source_code": "def __iadd__(self, other):\n m = getmask(other)\n if self._mask is nomask:\n if m is not nomask and m.any():\n self._mask = make_mask_none(self.shape, self.dtype)\n self._mask += m\n elif m is not nomask:\n self._mask += m\n other_data = getdata(other)\n other_data = np.where(self._mask, other_data.dtype.type(0), other_data)\n self._data.__iadd__(other_data)\n return self", + "docstring": "Add other to self in-place.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__iadd__ arg:self arg:other arguments arg arg Assign Call If Compare If BoolOp Compare Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "unit_impulse", + "source_code": "def unit_impulse(shape, idx=None, dtype=float):\n out = zeros(shape, dtype)\n shape = np.atleast_1d(shape)\n if idx is None:\n idx = (0,) * len(shape)\n elif idx == 'mid':\n idx = tuple(shape // 2)\n elif not hasattr(idx, '__iter__'):\n idx = (idx,) * len(shape)\n out[idx] = 1\n return out", + "docstring": "Unit impulse signal (discrete delta function) or unit basis vector. Parameters ---------- shape : int or tuple of int Number of samples in the output (1-D), or a tuple that represents the shape of the output (N-D). idx : None or int or tuple of int or 'mid', optional Index at which the value is 1. If None, defaults to the 0th element. If `idxu_k[n]k\\\\delta[n]\\\\delta[n-2]`): >>> signal.unit_impulse(7, 2) array([ 0., 0., 1., 0., 0., 0., 0.]) 2-dimensional impulse, centered: >>> signal.unit_impulse((3, 3), 'mid') array([[ 0., 0., 0.], [ 0., 1., 0.], [ 0., 0., 0.]]) Impulse at (2, 2), using broadcasting: >>> signal.unit_impulse((4, 4), 2) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 0.]]) Plot the impulse response of a 4th-order Butterworth lowpass filter: >>> imp = signal.unit_impulse(100, 'mid') >>> b, a = signal.butter(4, 0.2) >>> response = signal.lfilter(b, a, imp) >>> import numpy as np >>> import matplotlib.pyplot as plt >>> plt.plot(np.arange(-50, 50), imp) >>> plt.plot(np.arange(-50, 50), response) >>> plt.margins(0.1, 0.1) >>> plt.xlabel('Time [samples]') >>> plt.ylabel('Amplitude') >>> plt.grid(True) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_waveforms.py", + "ast_data": "FunctionDef name:unit_impulse arg:shape arg:idx arg:dtype arguments arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "proxy_call", + "source_code": "def proxy_call(self, fn, args, output_metadata):\n flat_args, _ = pytree.tree_flatten(args)\n proxy_args = pytree.tree_map(lambda e: self.to_proxy(e), args)\n proxy_out = self.fx_tracer.create_proxy('call_function', fn, args=proxy_args, kwargs={})\n result = [self.allocate_dummy() for _ in output_metadata]\n self.bind_objects_to_proxies(result, [proxy_out[i] for i in range(len(result))])\n return result", + "docstring": "Proxies a call to fn(*args) into the graph", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py", + "ast_data": "FunctionDef name:proxy_call arg:self arg:fn arg:args arg:output_metadata arguments arg arg arg arg Assign Call Assign Call arguments arg Call Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "unpad", + "source_code": "def unpad(self, padding_size: Tensor) -> 'Keypoints3D':\n raise NotImplementedError", + "docstring": "Pad a bounding keypoints. Args: padding_size: (B, 6)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\keypoints.py", + "ast_data": "FunctionDef name:unpad arg:self arg:padding_size arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "tensorflow_version", + "source_code": "def tensorflow_version(self):\n return self._reader.tensorflow_version()", + "docstring": "TensorFlow version used in the debugged TensorFlow program. Note: this is not necessarily the same as the version of TensorFlow used to load the DebugEvent file set. Returns: TensorFlow version used by the debugged program, as a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:tensorflow_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "PerReplicaSpec", + "source_code": "class PerReplicaSpec(type_spec.TypeSpec):\n __slots__ = ['_value_specs']\n value_type = property(lambda self: PerReplica)\n\n def __init__(self, *value_specs):\n self._value_specs = tuple(value_specs)\n\n def _serialize(self):\n return self._value_specs\n\n @property\n def _component_specs(self):\n return self._value_specs\n\n def _to_components(self, value):\n replica_context = distribute_lib.get_replica_context()\n if replica_context is not None and replica_context.num_replicas_in_sync > 1:\n raise ValueError('Flattening a PerReplica to components is not supported in replica context.')\n return value._values\n\n def _from_components(self, tensor_list):\n return PerReplica(tensor_list)", + "docstring": "Type specification for a .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "ClassDef name:PerReplicaSpec Assign Assign Call arguments arg FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Return return:yes FunctionDef name:_from_components arg:self arg:tensor_list arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "SphinxWarningLogRecord", + "source_code": "class SphinxWarningLogRecord(SphinxLogRecord):\n\n @property\n def prefix(self) -> str:\n if self.levelno >= logging.CRITICAL:\n return 'CRITICAL: '\n elif self.levelno >= logging.ERROR:\n return 'ERROR: '\n else:\n return 'WARNING: '", + "docstring": "Warning log record class supporting location", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "ClassDef name:SphinxWarningLogRecord FunctionDef name:prefix arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "@abstractmethod\ndef __call__(self, X, Y=None, eval_gradient=False):\n pass", + "docstring": "Evaluate the kernel.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg" + }, + { + "library": "pytorch", + "name": "is_torchdynamo_compiling", + "source_code": "def is_torchdynamo_compiling():\n return False", + "docstring": "Can't import torchdynamo in torchdeploy builds currently.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", + "ast_data": "FunctionDef name:is_torchdynamo_compiling arguments Return return:yes" + }, + { + "library": "scipy", + "name": "leaves_list", + "source_code": "@lazy_cython\ndef leaves_list(Z):\n xp = array_namespace(Z)\n Z = _asarray(Z, order='C', xp=xp)\n _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n\n def cy_leaves_list(Z, validate):\n if validate:\n _is_valid_linkage(Z, throw=True, name='Z', xp=np)\n n = Z.shape[0] + 1\n ML = np.zeros((n,), dtype=np.int32)\n _hierarchy.prelist(Z, ML, n)\n return ML\n n = Z.shape[0] + 1\n return xpx.lazy_apply(cy_leaves_list, Z, validate=is_lazy_array(Z), shape=(n,), dtype=xp.int32, as_numpy=True, xp=xp)", + "docstring": "Return a list of leaf node ids. The return corresponds to the observation vector index as it appears in the tree from left to right. Z is a linkage matrix. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. is a linkage matrix. See for more information. Returns ------- leaves_list : ndarray The list of leaf node ids. See Also -------- dendrogram : for information about dendrogram structure. Examples -------- >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list >>> from scipy.spatial.distance import pdist >>> from matplotlib import pyplot as plt >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) The linkage matrix `scipy.cluster.hierarchy.leaves_list` dataset and leaves in the dendrogram: >>> leaves_list(Z) array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) >>> fig = plt.figure(figsize=(25, 10)) >>> dn = dendrogram(Z) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:leaves_list arg:Z arguments arg Assign Call Assign Call Call FunctionDef name:cy_leaves_list arg:Z arg:validate arguments arg arg If Call Assign Assign Call Call Return return:yes Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "add_state", + "source_code": "def add_state(self, state):\n self._validate_state(state)\n self._state.add(state)", + "docstring": "Add a state to define the widget's behavior. See the parameters for details. Parameters ---------- state : str Must be a supported state of the selector. See the parameters for details. Raises ------ ValueError When the state is not supported by the selector.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:add_state arg:self arg:state arguments arg arg Call Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(rgbs, ['*', 'N', '3'])\n KORNIA_CHECK_SHAPE(densities, ['*', 'N'])\n KORNIA_CHECK_SHAPE(points_3d, ['*', 'N', '3'])\n num_ray_points: int = points_3d.shape[-2]\n points_3d = points_3d.reshape(-1, num_ray_points, 3)\n delta_3d = points_3d[0, 1, :] - points_3d[0, 0, :]\n delta = torch.linalg.norm(delta_3d, dim=-1)\n alpha = 1 - torch.exp(-1.0 * densities * delta)\n return self._render(alpha, rgbs)", + "docstring": "Render 3D regularly sampled points along rays. Args: rgbs: RGB values of points along rays :math: densities: Volume densities of points along rays :math: points_3d: 3D points along rays :math: Returns: Rendered RGB values for each ray :math:", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\volume_renderer.py", + "ast_data": "FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Call Call Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "linear_refinement_rule", + "source_code": "@register_refinement_rule(torch.nn.Linear)\ndef linear_refinement_rule(n: Node):\n res = []\n assert isinstance(n.args[0], Node)\n arg_type = n.args[0].type\n if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n res = [Equality(arg_type.__args__[0], n.type.__args__[0])]\n return res", + "docstring": "The equality constraints are between the first dimension of the input and output", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:linear_refinement_rule arg:n arguments arg Assign Call Assign If BoolOp Call Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "keypoints_to_grid", + "source_code": "def keypoints_to_grid(keypoints: Tensor, img_size: Tuple[int, int]) -> Tensor:\n KORNIA_CHECK_SHAPE(keypoints, ['N', '2'])\n n_points = len(keypoints)\n grid_points = normalize_pixel_coordinates(keypoints[:, [1, 0]], img_size[0], img_size[1])\n grid_points = grid_points.view(-1, n_points, 1, 2)\n return grid_points", + "docstring": "Convert a list of keypoints into a grid in [-1, 1]² that can be used in torch.nn.functional.interpolate. Args: keypoints: a tensor [N, 2] of N keypoints (ij coordinates convention). img_size: the original image size (H, W)", + "type": "function", + "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py", + "ast_data": "FunctionDef name:keypoints_to_grid arg:keypoints arg:img_size arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "Rescale", + "source_code": "class Rescale(Module):\n\n def __init__(self, factor: Union[float, Tensor]) -> None:\n super().__init__()\n if isinstance(factor, float):\n self.factor = tensor(factor)\n else:\n if not isinstance(factor, Tensor) or factor.ndim != 0:\n raise TypeError(f'Expected factor to be a float or a 0-d tensor, got {factor}.')\n self.factor = factor\n\n def forward(self, input: Tensor) -> Tensor:\n return input * self.factor", + "docstring": "Initialize the Rescale operator. Args: factor: The scaling factor. Could be a float or a 0-d tensor.", + "type": "class", + "file_path": "kornia\\kornia\\enhance\\rescale.py", + "ast_data": "ClassDef name:Rescale FunctionDef name:__init__ arg:self arg:factor arguments arg arg Call Call If Call Assign Call If BoolOp Call Compare Raise Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "set_up_storage_reader", + "source_code": "@abc.abstractmethod\ndef set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:\n pass", + "docstring": "Initialize this instance. Args: metadata (Metadata): The metadata schema to use. is_coordinator (bool): Whether this instance is responsible for coordinating the checkpoint.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", + "ast_data": "FunctionDef name:set_up_storage_reader arg:self arg:metadata arg:is_coordinator arguments arg arg arg" + }, + { + "library": "pandas", + "name": "to_2d_mgr", + "source_code": "def to_2d_mgr(self, columns: Index) -> BlockManager:\n blk = self.blocks[0]\n arr = ensure_block_shape(blk.values, ndim=2)\n bp = BlockPlacement(0)\n new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)\n axes = [columns, self.axes[0]]\n return BlockManager([new_blk], axes=axes, verify_integrity=False)", + "docstring": "Manager analogue of Series.to_frame", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:to_2d_mgr arg:self arg:columns arguments arg arg Assign Assign Call Assign Call Assign Call Call Assign Return return:yes Call" + }, + { + "library": "cryptography", + "name": "generate_private_key", + "source_code": "@abc.abstractmethod\ndef generate_private_key(self) -> DSAPrivateKey:\n pass", + "docstring": "Generates and returns a DSAPrivateKey.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py", + "ast_data": "FunctionDef name:generate_private_key arg:self arguments arg" + }, + { + "library": "django", + "name": "h", + "source_code": "def h(self):\n return '%02d' % (self.data.hour % 12 or 12)", + "docstring": "Hour, 12-hour format; i.e. '01' to '12'", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:h arg:self arguments arg Return return:yes BoolOp" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_LAF(laf)\n KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n B, N = laf.shape[:2]\n PS: int = self.patch_size\n patches: torch.Tensor = extract_patches_from_pyramid(img, make_upright(laf), PS, True).view(-1, 1, PS, PS)\n ellipse_shape: torch.Tensor = self.affine_shape_detector(patches)\n ellipses = torch.cat([laf.view(-1, 2, 3)[..., 2].unsqueeze(1), ellipse_shape], dim=2).view(B, N, 5)\n scale_orig = get_laf_scale(laf)\n if self.preserve_orientation:\n ori_orig = get_laf_orientation(laf)\n laf_out = ellipse_to_laf(ellipses)\n ellipse_scale = get_laf_scale(laf_out)\n laf_out = scale_laf(laf_out, scale_orig / ellipse_scale)\n if self.preserve_orientation:\n laf_out = set_laf_orientation(laf_out, ori_orig)\n return laf_out", + "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF_out: :math:", + "type": "method", + "file_path": "kornia\\kornia\\feature\\affine_shape.py", + "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Call Call Assign Call Call Call Call Assign Call Call Call Call Assign Call If Assign Call Assign Call Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_comm_counts", + "source_code": "def get_comm_counts(self) -> dict[Any, int]:\n return self.comm_counts", + "docstring": "Returns the communication counts as a dictionary. Returns: Dict[Any, int]: The communication counts as a dictionary.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py", + "ast_data": "FunctionDef name:get_comm_counts arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, graph, run_metadata):\n self._graph = graph\n self._run_metadata = run_metadata\n self._string_table = StringTable()\n self._functions = Functions(self._string_table)\n self._locations = Locations(self._functions)", + "docstring": "Constructor. Args: graph: A instance. run_metadata: A list of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:graph arg:run_metadata arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, size=1, facecolor=None):\n facecolor = mpl._val_or_rc(facecolor, 'axes.edgecolor')\n self.size = size\n self._facecolor = facecolor\n super().__init__(size=size)", + "docstring": "Parameters ---------- size : float Size of the arrow as a fraction of the ticklabel size. facecolor : :mpltype:, default: :rc: Fill color. .. versionadded:: 3.7", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:size arg:facecolor arguments arg arg arg Assign Call Assign Assign Call Call" + }, + { + "library": "django", + "name": "attach_file", + "source_code": "def attach_file(self, path, mimetype=None):\n path = Path(path)\n with path.open('rb') as file:\n content = file.read()\n self.attach(path.name, content, mimetype)", + "docstring": "Attach a file from the filesystem. Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified and cannot be guessed. For a text/* mimetype (guessed or specified), decode the file's content as UTF-8. If that fails, set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.", + "type": "method", + "file_path": "django\\django\\core\\mail\\message.py", + "ast_data": "FunctionDef name:attach_file arg:self arg:path arg:mimetype arguments arg arg arg Assign Call With Call Assign Call Call" + }, + { + "library": "django", + "name": "info", + "source_code": "@property\ndef info(self):\n return capi.get_ds_info(self.ptr, None).decode()", + "docstring": "Return information about this raster in a string format equivalent to the output of the gdalinfo command line utility.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:info arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "response_headers", + "source_code": "def response_headers(headers=None, debug=False):\n if debug:\n cherrypy.log('Setting response headers: %s' % repr(headers), 'TOOLS.RESPONSE_HEADERS')\n for name, value in headers or []:\n cherrypy.serving.response.headers[name] = value", + "docstring": "Set headers on the response.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:response_headers arg:headers arg:debug arguments arg arg If Call Call For BoolOp Assign" + }, + { + "library": "matplotlib", + "name": "get_filterrad", + "source_code": "def get_filterrad(self):\n return self._filterrad", + "docstring": "Return the filterrad setting.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:get_filterrad arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "gradient_hessian", + "source_code": "def gradient_hessian(self, y_true, raw_prediction, sample_weight=None, gradient_out=None, hessian_out=None, n_threads=1):\n if gradient_out is None:\n if hessian_out is None:\n gradient_out = np.empty_like(raw_prediction)\n hessian_out = np.empty_like(raw_prediction)\n else:\n gradient_out = np.empty_like(hessian_out)\n elif hessian_out is None:\n hessian_out = np.empty_like(gradient_out)\n if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n raw_prediction = raw_prediction.squeeze(1)\n if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:\n gradient_out = gradient_out.squeeze(1)\n if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:\n hessian_out = hessian_out.squeeze(1)\n self.closs.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, gradient_out=gradient_out, hessian_out=hessian_out, n_threads=n_threads)\n return (gradient_out, hessian_out)", + "docstring": "Compute gradient and hessian of loss w.r.t raw_prediction. Parameters ---------- y_true : C-contiguous array of shape (n_samples,) Observed, true target values. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). sample_weight : None or C-contiguous array of shape (n_samples,) Sample weights. gradient_out : None or C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) A location into which the gradient is stored. If None, a new array might be created. hessian_out : None or C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) A location into which the hessian is stored. If None, a new array might be created. n_threads : int, default=1 Might use openmp thread parallelism. Returns ------- gradient : arrays of shape (n_samples,) or (n_samples, n_classes) Element-wise gradients. hessian : arrays of shape (n_samples,) or (n_samples, n_classes) Element-wise hessians.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:gradient_hessian arg:self arg:y_true arg:raw_prediction arg:sample_weight arg:gradient_out arg:hessian_out arg:n_threads arguments arg arg arg arg arg arg arg If Compare If Compare Assign Call Assign Call Assign Call If Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_get_data_as_items", + "source_code": "def _get_data_as_items(self) -> list[tuple[str, int]]:\n rng = self._range\n return [('start', rng.start), ('stop', rng.stop), ('step', rng.step)]", + "docstring": "return a list of tuples of start, stop, step", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_get_data_as_items arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n if y is None:\n X = validate_data(self, X, accept_sparse=['csr', 'csc'])\n else:\n X, y = validate_data(self, X, y, accept_sparse=['csr', 'csc'], multi_output=True)\n self._check_params(X, y)\n score_func_ret = self.score_func(X, y)\n if isinstance(score_func_ret, (list, tuple)):\n self.scores_, self.pvalues_ = score_func_ret\n self.pvalues_ = np.asarray(self.pvalues_)\n else:\n self.scores_ = score_func_ret\n self.pvalues_ = None\n self.scores_ = np.asarray(self.scores_)\n return self", + "docstring": "Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) or None The target values (class labels in classification, real numbers in regression). If the selector is unsupervised then can be set to . Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Compare Assign Call Assign Call Call Assign Call If Call Assign Assign Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_find_localzeros", + "source_code": "@classmethod\ndef _find_localzeros(cls, values, **options):\n other_values = set()\n num_value = None\n for arg in values:\n if arg.is_Number:\n if num_value is None:\n num_value = arg\n elif cls is Max:\n num_value = max(num_value, arg)\n elif cls is Min:\n num_value = min(num_value, arg)\n else:\n raise AssertionError(f'impossible {cls}')\n else:\n other_values.add(arg)\n if num_value is None:\n return other_values\n if len(other_values) == 0:\n return {num_value}\n if len(other_values) == 1:\n other_value = next(iter(other_values))\n if num_value in (0.0, 0) and other_value.is_nonnegative:\n return other_values if cls is Max else {num_value}\n if num_value == 1 and other_value.is_positive:\n return other_values if cls is Max else {num_value}\n other_values.add(num_value)\n return other_values", + "docstring": "Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros. Unlike the sympy implementation, we only look for zero and one, we don't do generic is connected test pairwise which is slow", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py", + "ast_data": "FunctionDef name:_find_localzeros arg:cls arg:values arguments arg arg arg Assign Call Assign For If If Compare Assign If Compare Assign Call If Compare Assign Call Raise Call Call If Compare Return return:yes If Compare Call Return return:yes If Compare Call Assign Call Call If BoolOp Compare Return return:yes Compare If BoolOp Compare Return return:yes Compare Call Return return:yes" + }, + { + "library": "django", + "name": "find", + "source_code": "def find(self, path, find_all=False, **kwargs):\n raise NotImplementedError('subclasses of BaseFinder must provide a find() method')", + "docstring": "Given a relative file path, find an absolute file path. If the `` parameter is False (default) return only the first found file path; if True, return a list of all found files paths.", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "set_annotation_clip", + "source_code": "def set_annotation_clip(self, b):\n self._annotation_clip = b", + "docstring": "Set the annotation's clipping behavior. Parameters ---------- b : bool or None - True: The annotation will be clipped when ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_annotation_clip arg:self arg:b arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "invoke", + "source_code": "def invoke(self):\n self._ensure_safe()\n self._interpreter.Invoke()", + "docstring": "Invoke the interpreter. Be sure to set the input sizes, allocate tensors and fill values before calling this. Also, note that this function releases the GIL so heavy computation can be done in the background while the Python interpreter continues. No other function on this object should be called while the invoke() call has not finished. Raises: ValueError: When the underlying interpreter fails raise ValueError.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:invoke arg:self arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "opcode_to_name", + "source_code": "def opcode_to_name(model, op_code):\n op = model.operatorCodes[op_code]\n code = max(op.builtinCode, op.deprecatedBuiltinCode)\n for name, value in vars(schema_fb.BuiltinOperator).items():\n if value == code:\n return name\n return None", + "docstring": "Converts a TFLite op_code to the human readable name. Args: model: The input tflite model. op_code: The op_code to resolve to a readable name. Returns: A string containing the human readable op name, or None if not resolvable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py", + "ast_data": "FunctionDef name:opcode_to_name arg:model arg:op_code arguments arg arg Assign Assign Call For Call Call If Compare Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "PolynomialLR", + "source_code": "class PolynomialLR(LRScheduler):\n\n def __init__(self, optimizer: Optimizer, total_iters: int=5, power: float=1.0, last_epoch: int=-1) -> None:\n self.total_iters = total_iters\n self.power = power\n super().__init__(optimizer, last_epoch)\n\n @override\n def get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self._is_initial or self.last_epoch > self.total_iters:\n return [group['lr'] for group in self.optimizer.param_groups]\n decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power\n return [group['lr'] * decay_factor for group in self.optimizer.param_groups]\n\n def _get_closed_form_lr(self):\n return [base_lr * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) ** self.power for base_lr in self.base_lrs]", + "docstring": "Decays the learning rate of each parameter group using a polynomial function in the given total_iters. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. power (float): The power of the polynomial. Default: 1.0. Example: >>> # xdoctest: +SKIP(\"undefined vars\") >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.0490 if epoch == 0 >>> # lr = 0.0481 if epoch == 1 >>> # lr = 0.0472 if epoch == 2 >>> # ... >>> # lr = 0.0 if epoch >= 50 >>> scheduler = PolynomialLR(optimizer, total_iters=50, power=0.9) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/PolynomialLR.png", + "type": "class", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "ClassDef name:PolynomialLR FunctionDef name:__init__ arg:self arg:optimizer arg:total_iters arg:power arg:last_epoch arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:get_lr arg:self arguments arg Call If BoolOp Compare Return return:yes Assign Return return:yes FunctionDef name:_get_closed_form_lr arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "Transform", + "source_code": "class Transform(RegisterLookupMixin, Func):\n bilateral = False\n arity = 1\n\n @property\n def lhs(self):\n return self.get_source_expressions()[0]\n\n def get_bilateral_transforms(self):\n if hasattr(self.lhs, 'get_bilateral_transforms'):\n bilateral_transforms = self.lhs.get_bilateral_transforms()\n else:\n bilateral_transforms = []\n if self.bilateral:\n bilateral_transforms.append(self.__class__)\n return bilateral_transforms", + "docstring": "RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field.", + "type": "class", + "file_path": "django\\django\\db\\models\\lookups.py", + "ast_data": "ClassDef name:Transform Assign Assign FunctionDef name:lhs arg:self arguments arg Return return:yes Call FunctionDef name:get_bilateral_transforms arg:self arguments arg If Call Assign Call Assign If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "not_eq", + "source_code": "def not_eq(a, b):\n return not_(eq(a, b))", + "docstring": "Functional form of \"not-equal\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py", + "ast_data": "FunctionDef name:not_eq arg:a arg:b arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_shared_name", + "source_code": "@property\ndef _shared_name(self):\n return self.name[:self.name.index(':')]", + "docstring": "The shared name of the variable. Unlike name(), shared_name doesn't have \":0\" suffix. It is user-specified name with name scope prefix. Returns: variable name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_shared_name arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_reduce_fake_tensor", + "source_code": "def _reduce_fake_tensor(self, t: Tensor) -> tuple[Callable[[T], T], tuple[TensorMetadata]]:\n metadata = extract_tensor_metadata_for_cache_key(t)\n return (_ident, (metadata,))", + "docstring": "Custom reducer to pickle FakeTensors.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:_reduce_fake_tensor arg:self arg:t arguments arg arg Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "LossWrapper", + "source_code": "class LossWrapper(torch.nn.Module):\n\n def __init__(self, module, loss_fn):\n super().__init__()\n self.module = module\n self.loss_fn = loss_fn\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError('This instance of LossWrapper does not have an overriddenforward(). Please implement forward() to specify the arguments, connection between the module and loss, and loss output value.')", + "docstring": "LossWrapper is a convenient abstract class that allows you to wrap up both your model as well as its loss function and specify the connectivity between the inputs, model, loss function, and output value. Example:: class MyModelWrapper(LossWrapper): def forward(self, x, targets): model_out = self.module(x) loss_value = self.loss_fn(model_out, targets) return loss_value The above example defines a connectivity where we expect the forward/loss/backward training procedure to take two arguments (x and targets), pass x into the module to get the output of the feedforward computation, pass the model output and the targets value into the loss function, and get and return the loss value, which will be backpropagated by PiPPy. The above class would then be instantiated like:: model = ... # instantiate the model loss_fn = torch.nn.MSELoss() # for the sake of demonstration wrapper = MyModelWrapper(model, loss_fn) pipe = Pipe.from_tracing(wrapper, ...)", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", + "ast_data": "ClassDef name:LossWrapper FunctionDef name:__init__ arg:self arg:module arg:loss_fn arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arguments arg arg arg Raise Call" + }, + { + "library": "scipy", + "name": "yule", + "source_code": "def yule(u, v, w=None):\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)\n half_R = ntf * nft\n if half_R == 0:\n return 0.0\n else:\n return float(2.0 * half_R / (ntt * nff + half_R))", + "docstring": "Compute the Yule dissimilarity between two boolean 1-D arrays. The Yule dissimilarity is defined as .. math:: \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.yule([1, 0, 0], [0, 1, 0]) 2.0 >>> distance.yule([1, 1, 0], [0, 1, 0]) 0.0", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:yule arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n self.__dict__.update(state_dict)\n self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)", + "docstring": "Load the scheduler's state.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call Call" + }, + { + "library": "pytorch", + "name": "see", + "source_code": "def see(self, *key):\n if key in self.seen:\n raise RuntimeError('duplicate key: ' + str(key))\n self.seen.add(key)", + "docstring": "Observe a key and raise an error if it is seen multiple times.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler.py", + "ast_data": "FunctionDef name:see arg:self arguments arg arg If Compare Raise Call Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X):\n check_is_fitted(self)\n mean_proba = np.zeros((_num_samples(X), len(self.classes_)))\n for calibrated_classifier in self.calibrated_classifiers_:\n proba = calibrated_classifier.predict_proba(X)\n mean_proba += proba\n mean_proba /= len(self.calibrated_classifiers_)\n return mean_proba", + "docstring": "Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by . Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\calibration.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call Call For Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "parse_segments", + "source_code": "def parse_segments(raw_segments: list[list[int]]) -> list[LlvmCoverageSegment]:\n ret: list[LlvmCoverageSegment] = []\n for raw_segment in raw_segments:\n assert len(raw_segment) == 5 or len(raw_segment) == 6, 'list is not compatible with llvmcom export:'\n ' Expected to have 5 or 6 elements'\n if len(raw_segment) == 5:\n ret.append(LlvmCoverageSegment(raw_segment[0], raw_segment[1], raw_segment[2], raw_segment[3], raw_segment[4], None))\n else:\n ret.append(LlvmCoverageSegment(*raw_segment))\n return ret", + "docstring": "Creates LlvmCoverageSegment from a list of lists in llvm export json. each segment is represented by 5-element array.", + "type": "function", + "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\llvm_coverage_segment.py", + "ast_data": "FunctionDef name:parse_segments arg:raw_segments arguments arg For BoolOp Compare Call Compare Call If Compare Call Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_minor_number", + "source_code": "def set_minor_number(self, minor_number):\n self._minor_number = minor_number", + "docstring": "Set the number of minor ticks to label when some minor ticks are labelled. Parameters ---------- minor_number : int Number of ticks which are labelled when the number of ticks is below the threshold.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_minor_number arg:self arg:minor_number arguments arg arg Assign" + }, + { + "library": "pandas", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n parened = (f'({pprint_thing(opr)})' for opr in self.operands)\n return pprint_thing(f' {self.op} '.join(parened))", + "docstring": "Print a generic n-ary operator and its operands using infix notation.", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\ops.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "shear_x", + "source_code": "def shear_x(probability: float, magnitude: int) -> OperationBase:\n magnitudes = linspace(-0.3, 0.3, 11) * 180.0\n return ShearX(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()), symmetric_megnitude=False)", + "docstring": "Return ShearX op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py", + "ast_data": "FunctionDef name:shear_x arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "_ensure_has_backend", + "source_code": "def _ensure_has_backend(self):\n dict.setdefault(self, 'backend', rcsetup._auto_backend_sentinel)", + "docstring": "Ensure that a \"backend\" entry exists. Normally, the default matplotlibrc file contains *no* entry for \"backend\" (the corresponding line starts with ##, not #; we fill in _auto_backend_sentinel in that case. However, packagers can set a different default backend (resulting in a normal line) in which case we should *not* fill in _auto_backend_sentinel.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:_ensure_has_backend arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "remove_node", + "source_code": "def remove_node(model: GraphModule, node: Node, prev_node: Node):\n orig_users = list(node.users.keys())\n for user_node in orig_users:\n user_node.replace_input_with(node, prev_node)\n model.graph.erase_node(node)", + "docstring": "Removes the given node from the model by replacing all of its users with the given previous node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:remove_node arg:model arg:node arg:prev_node arguments arg arg arg Assign Call Call For Call Call" + }, + { + "library": "pytorch", + "name": "load_storage", + "source_code": "def load_storage(self, storage: Storage, offset: int=0) -> None:\n assert self.handle is not None, 'Cannot load data from a file that is not registered.'\n torch._C._gds_load_storage(self.handle, storage, offset)", + "docstring": "Loads data from the file into the storage. This is a wrapper around `` into the storage. Args: storage (Storage): Storage to load data into. offset (int, optional): Offset into the file to start loading from. (Default: 0)", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\gds.py", + "ast_data": "FunctionDef name:load_storage arg:self arg:storage arg:offset arguments arg arg arg Compare Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, input, chunkSize=65536):\n self.input = input\n self.chunkSize = chunkSize", + "docstring": "Initialize file_generator with file `` for chunked access.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:input arg:chunkSize arguments arg arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "dump_chrome_trace", + "source_code": "def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1, devices=None, kwargs_for_f=None, kwargs_for_profiler=None):\n if devices is None:\n devices = ['cuda']\n global synchronize\n if devices != ['cpu'] and torch.cuda.is_available():\n synchronize = torch.cuda.synchronize\n if kwargs_for_f is None:\n kwargs_for_f = {}\n if kwargs_for_profiler is None:\n kwargs_for_profiler = {}\n with optimize_ctx:\n torch.manual_seed(1337)\n for _ in range(5):\n f(input, **kwargs_for_f)\n synchronize()\n torch.manual_seed(1337)\n t0 = time.perf_counter()\n for _ in range(num_runs):\n f(input, **kwargs_for_f)\n synchronize()\n t1 = time.perf_counter()\n timing = t1 - t0\n with profile(activities=activities, **kwargs_for_profiler) as prof:\n with optimize_ctx:\n synchronize()\n torch.manual_seed(1337)\n for _ in range(num_runs):\n f(input, **kwargs_for_f)\n synchronize()\n prof.export_chrome_trace(trace_filename)\n return timing", + "docstring": "Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times to [trace_filename]. [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA. Return total runtime without the profiler Outputs to trace_filename", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\benchmark_utils.py", + "ast_data": "FunctionDef name:dump_chrome_trace arg:f arg:input arg:trace_filename arg:optimize_ctx arg:activities arg:num_runs arg:devices arg:kwargs_for_f arg:kwargs_for_profiler arguments arg arg arg arg arg arg arg arg arg If Compare Assign If BoolOp Compare Call Assign If Compare Assign If Compare Assign With Call For Call Call Call Call Assign Call For Call Call Call Assign Call Assign With Call With Call Call For Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "write_dirpath", + "source_code": "def write_dirpath(dirpath, strategy):\n if strategy is None:\n strategy = distribute_lib.get_strategy()\n if strategy is None:\n return dirpath\n if not strategy.extended._in_multi_worker_mode():\n return dirpath\n if strategy.extended.should_checkpoint:\n return dirpath\n return _get_temp_dir(dirpath, strategy)", + "docstring": "Returns the writing dir that should be used to save file distributedly. would be created if it doesn't exist. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing dir path that should be used to save with distribution.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py", + "ast_data": "FunctionDef name:write_dirpath arg:dirpath arg:strategy arguments arg arg If Compare Assign Call If Compare Return return:yes If Call Return return:yes If Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "hard_sigmoid", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef hard_sigmoid(x):\n point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)\n point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)\n x = math_ops.multiply(x, point_two)\n x = math_ops.add(x, point_five)\n x = clip_ops.clip_by_value(x, 0.0, 1.0)\n return x", + "docstring": "Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns if . In , returns . Args: x: A tensor or variable. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:hard_sigmoid arg:x arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "nn_accuracy", + "source_code": "def nn_accuracy(X, X_embedded, k=1):\n knn = NearestNeighbors(n_neighbors=1, n_jobs=-1)\n _, neighbors_X = knn.fit(X).kneighbors()\n _, neighbors_X_embedded = knn.fit(X_embedded).kneighbors()\n return np.mean(neighbors_X == neighbors_X_embedded)", + "docstring": "Accuracy of the first nearest neighbor", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_tsne_mnist.py", + "ast_data": "FunctionDef name:nn_accuracy arg:X arg:X_embedded arg:k arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "encoding_specs", + "source_code": "def encoding_specs(self, spec):\n return spec._component_specs", + "docstring": "Returns a list of (s) describing the encoding for . See for a description of the default encoding. Subclasses may override this default definition, when necessary. Args: spec: The TypeSpec whose encoding should be described. Returns: A nest (as defined by tf.TypeSpecself.encode(spec, ...)`. All TypeSpecs in this nest must be batchable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "FunctionDef name:encoding_specs arg:self arg:spec arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_function_scope_options", + "source_code": "def _function_scope_options(self, fn_scope):\n if fn_scope.level == 2:\n return self.ctx.user.options\n return self.ctx.user.options.call_options()", + "docstring": "Returns the options with which to create function scopes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\functions.py", + "ast_data": "FunctionDef name:_function_scope_options arg:self arg:fn_scope arguments arg arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "visualize", + "source_code": "def visualize(self, images: Union[Tensor, list[Tensor]], edge_maps: Optional[Union[Tensor, list[Tensor]]]=None, output_type: str='torch') -> Union[Tensor, list[Tensor], list[Image.Image]]:\n if edge_maps is None:\n edge_maps = self.forward(images)\n output = []\n for edge_map in edge_maps:\n output.append(grayscale_to_rgb(edge_map)[0])\n return self._tensor_to_type(output, output_type, is_batch=isinstance(images, Tensor))", + "docstring": "Draw the edge detection results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. Returns: output tensor.", + "type": "method", + "file_path": "kornia\\kornia\\models\\edge_detection\\base.py", + "ast_data": "FunctionDef name:visualize arg:self arg:images arg:edge_maps arg:output_type arguments arg arg arg arg If Compare Assign Call Assign For Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "is_view", + "source_code": "@property\ndef is_view(self) -> bool:\n if len(self.blocks) == 1:\n return self.blocks[0].is_view\n return False", + "docstring": "return a boolean if we are a single block and are a view", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:is_view arg:self arguments arg If Compare Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "_check_xy", + "source_code": "def _check_xy(self, renderer=None):\n if renderer is None:\n renderer = self.get_figure(root=True)._get_renderer()\n b = self.get_annotation_clip()\n if b or (b is None and self.xycoords == 'data'):\n xy_pixel = self._get_position_xy(renderer)\n return self.axes.contains_point(xy_pixel)\n return True", + "docstring": "Check whether the annotation at *xy_pixel* should be drawn.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:_check_xy arg:self arg:renderer arguments arg arg If Compare Assign Call Call Assign Call If BoolOp BoolOp Compare Compare Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "staged_predict", + "source_code": "def staged_predict(self, X):\n X = self._check_X(X)\n n_classes = self.n_classes_\n classes = self.classes_\n if n_classes == 2:\n for pred in self.staged_decision_function(X):\n yield np.array(classes.take(pred > 0, axis=0))\n else:\n for pred in self.staged_decision_function(X):\n yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))", + "docstring": "Return staged predictions for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted classes.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", + "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg Assign Call Assign Assign If Compare For Call Call Call Compare For Call Call Call Call" + }, + { + "library": "pytorch", + "name": "get_prologue_template_epilogue", + "source_code": "@staticmethod\ndef get_prologue_template_epilogue(nodes: list[BaseSchedulerNode]) -> tuple[list[BaseSchedulerNode], BaseSchedulerNode, list[BaseSchedulerNode]]:\n template_index = next((i for i, n in enumerate(nodes) if n.is_template()))\n prologue = nodes[:template_index]\n template_node = nodes[template_index]\n epilogue = nodes[template_index + 1:]\n return (prologue, template_node, epilogue)", + "docstring": "For the list of nodes, get the prologue, template, and epilogue", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:get_prologue_template_epilogue arg:nodes arguments arg Assign Call Call Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_SparseTensorSliceDataset", + "source_code": "class _SparseTensorSliceDataset(dataset_ops.DatasetSource):\n\n def __init__(self, sparse_tensor):\n if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):\n raise TypeError(f'Invalid `sparse_tensor`. `sparse_tensor` must be a `tf.sparse.SparseTensor`. Got {type(sparse_tensor)}.')\n self._sparse_tensor = sparse_tensor\n indices_shape = self._sparse_tensor.indices.get_shape()\n shape_shape = self._sparse_tensor.dense_shape.get_shape()\n rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)\n self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64), tensor_spec.TensorSpec([None], self._sparse_tensor.dtype), tensor_spec.TensorSpec([rank], dtypes.int64))\n variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(self._sparse_tensor.indices, self._sparse_tensor.values, self._sparse_tensor.dense_shape)\n super().__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure", + "docstring": "A that splits a rank-N into its rows.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_sparse_tensor_slices_op.py", + "ast_data": "ClassDef name:_SparseTensorSliceDataset FunctionDef name:__init__ arg:self arg:sparse_tensor arguments arg arg If Call Raise Call Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_register_foreach_lowering", + "source_code": "def _register_foreach_lowering(aten_fn, decomp_fn):\n\n @functools.wraps(decomp_fn)\n def wrapped(*args, **kwargs):\n assert len(args) <= 2\n out = decomp_fn(*args, **kwargs)\n validate_ir(out)\n return out\n aten_fns = get_overloads(aten_fn)\n foreach_ops.update(aten_fns)\n lowerings.update(dict.fromkeys(aten_fns, wrapped))\n return wrapped", + "docstring": "Add a foreach lowering to lowerings dict. Arguments: aten_fn: torch.ops.aten.* fn we are lowering decomp_fn: alternate implementation on our IR broadcast: True to apply broadcasting to tensor inputs type_promotion_kind: kind of type promotion applied to tensor inputs, means no type promotion convert_input_to_bool: some logical ops require inputs are converted to bool", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:_register_foreach_lowering arg:aten_fn arg:decomp_fn arguments arg arg FunctionDef name:wrapped arguments arg arg Compare Call Assign Call Call Return return:yes Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "BaseTable", + "source_code": "class BaseTable:\n join_type = None\n parent_alias = None\n filtered_relation = None\n\n def __init__(self, table_name, alias):\n self.table_name = table_name\n self.table_alias = alias\n\n def as_sql(self, compiler, connection):\n alias_str = '' if self.table_alias == self.table_name else ' %s' % self.table_alias\n base_sql = compiler.quote_name_unless_alias(self.table_name)\n return (base_sql + alias_str, [])\n\n def relabeled_clone(self, change_map):\n return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias))\n\n @property\n def identity(self):\n return (self.__class__, self.table_name, self.table_alias)\n\n def __eq__(self, other):\n if not isinstance(other, BaseTable):\n return NotImplemented\n return self.identity == other.identity\n\n def __hash__(self):\n return hash(self.identity)", + "docstring": "The BaseTable class is used for base table references in FROM clause. For example, the SQL \"foo\" in SELECT * FROM \"foo\" WHERE somecond could be generated by this class.", + "type": "class", + "file_path": "django\\django\\db\\models\\sql\\datastructures.py", + "ast_data": "ClassDef name:BaseTable Assign Assign Assign FunctionDef name:__init__ arg:self arg:table_name arg:alias arguments arg arg arg Assign Assign FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Assign Compare Assign Call Return return:yes FunctionDef name:relabeled_clone arg:self arg:change_map arguments arg arg Return return:yes Call Call FunctionDef name:identity arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "evaluate", + "source_code": "def evaluate(self, env, engine: str, parser, term_type, eval_in_python):\n if engine == 'python':\n res = self(env)\n else:\n left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python)\n right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python)\n if self.op in eval_in_python:\n res = self.func(left.value, right.value)\n else:\n from pandas.core.computation.eval import eval\n res = eval(self, local_dict=env, engine=engine, parser=parser)\n name = env.add_tmp(res)\n return term_type(name, env=env)", + "docstring": "Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The \"pre-evaluated\" expression as an instance of ``", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\ops.py", + "ast_data": "FunctionDef name:evaluate arg:self arg:env arg:engine arg:parser arg:term_type arg:eval_in_python arguments arg arg arg arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "broadcast_to_rank", + "source_code": "def broadcast_to_rank(self, rank):\n if self.rank is None:\n raise ValueError('Unable to broadcast: self.rank is unknown')\n dims_to_add = rank - self.rank\n if dims_to_add < 0:\n raise ValueError('Unable to broadcast: rank=%d must be greater than self.rank=%d.' % (rank, self.rank))\n elif dims_to_add == 0:\n return self\n elif self._partitioned_dim_sizes:\n partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes\n return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, self.dim_size_dtype)\n else:\n inner_dims = array_ops.concat([array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0)\n return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)", + "docstring": "Adds leading size-1 dimensions to broadcast to the given rank. E.g., if is , then is . Args: rank: The rank for the returned shape. Returns: A RaggedTensorDynamicShape with dimensions, whose inner dimensions have the same size as and whose outer dimensions have size . Raises: ValueError: If is unknown or greater than .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", + "ast_data": "FunctionDef name:broadcast_to_rank arg:self arg:rank arguments arg arg If Compare Raise Call Assign If Compare Raise Call If Compare Return return:yes If Assign Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "to_numpy", + "source_code": "def to_numpy(self, x: Any) -> np.array:\n if isinstance(x, (Tensor,)):\n return x.cpu().detach().numpy()\n if isinstance(x, (np.ndarray,)):\n return x\n if isinstance(x, (Image.Image,)):\n return np.array(x)\n raise TypeError('Input type not supported')", + "docstring": "Convert input to numpy array. Args: x: The input to convert. Returns: np.array: The converted numpy array.", + "type": "method", + "file_path": "kornia\\kornia\\core\\module.py", + "ast_data": "FunctionDef name:to_numpy arg:self arg:x arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes If Call Return return:yes Call Raise Call" + }, + { + "library": "sphinx", + "name": "find_autosummary_in_files", + "source_code": "def find_autosummary_in_files(filenames: Sequence[str | os.PathLike[str]]) -> list[AutosummaryEntry]:\n documented: list[AutosummaryEntry] = []\n for filename in filenames:\n with open(filename, encoding='utf-8', errors='ignore') as f:\n lines = f.read().splitlines()\n documented.extend(find_autosummary_in_lines(lines, filename=filename))\n return documented", + "docstring": "Find out what items are documented in source/*.rst. See .", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py", + "ast_data": "FunctionDef name:find_autosummary_in_files arg:filenames arguments arg For With Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "InMemoryUploadedFile", + "source_code": "class InMemoryUploadedFile(UploadedFile):\n\n def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):\n super().__init__(file, name, content_type, size, charset, content_type_extra)\n self.field_name = field_name\n\n def open(self, mode=None):\n self.file.seek(0)\n return self\n\n def chunks(self, chunk_size=None):\n self.file.seek(0)\n yield self.read()\n\n def multiple_chunks(self, chunk_size=None):\n return False", + "docstring": "A file uploaded into memory (i.e. stream-to-memory).", + "type": "class", + "file_path": "django\\django\\core\\files\\uploadedfile.py", + "ast_data": "ClassDef name:InMemoryUploadedFile FunctionDef name:__init__ arg:self arg:file arg:field_name arg:name arg:content_type arg:size arg:charset arg:content_type_extra arguments arg arg arg arg arg arg arg arg Call Call Assign FunctionDef name:open arg:self arg:mode arguments arg arg Call Return return:yes FunctionDef name:chunks arg:self arg:chunk_size arguments arg arg Call Call FunctionDef name:multiple_chunks arg:self arg:chunk_size arguments arg arg Return return:yes" + }, + { + "library": "pandas", + "name": "update_info", + "source_code": "def update_info(self, info) -> None:\n for key in self._info_fields:\n value = getattr(self, key, None)\n idx = info.setdefault(self.name, {})\n existing_value = idx.get(key)\n if key in idx and value is not None and (existing_value != value):\n if key in ['freq', 'index_name']:\n ws = attribute_conflict_doc % (key, existing_value, value)\n warnings.warn(ws, AttributeConflictWarning, stacklevel=find_stack_level())\n idx[key] = None\n setattr(self, key, None)\n else:\n raise ValueError(f'invalid info for [{self.name}] for [{key}], existing_value [{existing_value}] conflicts with new value [{value}]')\n elif value is not None or existing_value is not None:\n idx[key] = value", + "docstring": "set/update the info for this indexable with the key/value if there is a conflict raise/warn as needed", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:update_info arg:self arg:info arguments arg arg For Assign Call Assign Call Assign Call If BoolOp Compare Compare Compare If Compare Assign Call Call Assign Call Raise Call If BoolOp Compare Compare Assign" + }, + { + "library": "sphinx", + "name": "findall", + "source_code": "def findall(self, node: Node) -> Iterator[N]:\n for found in node.findall(self):\n yield cast('N', found)", + "docstring": "An alternative to with improved type safety. While the object can be used as an argument to , doing so confounds type checkers' ability to determine the return type of the iterator.", + "type": "method", + "file_path": "sphinx\\sphinx\\util\\nodes.py", + "ast_data": "FunctionDef name:findall arg:self arg:node arguments arg arg For Call Call" + }, + { + "library": "tensorflow", + "name": "task_ordinal_at_coordinates", + "source_code": "def task_ordinal_at_coordinates(self, device_coordinates):\n return self._topology_tasks[tuple(device_coordinates)]", + "docstring": "Returns the TensorFlow task number attached to . Args: device_coordinates: An integer sequence describing a device's physical coordinates in the TPU fabric. Returns: Returns the TensorFlow task number that contains the TPU device with those physical coordinates.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:task_ordinal_at_coordinates arg:self arg:device_coordinates arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "accuracy", + "source_code": "def accuracy(pred: Tensor, target: Tensor, topk: Tuple[int, ...]=(1,)) -> List[Tensor]:\n maxk = min(max(topk), pred.size()[1])\n batch_size = target.size(0)\n _, pred = pred.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100.0 / batch_size for k in topk]", + "docstring": "Compute the accuracy over the k top predictions for the specified values of k. Args: pred: the input tensor with the logits to evaluate. target: the tensor containing the ground truth. topk: the expected topk ranking. Example: >>> logits = torch.tensor([[0, 1, 0]]) >>> target = torch.tensor([[1]]) >>> accuracy(logits, target) [tensor(100.)]", + "type": "function", + "file_path": "kornia\\kornia\\metrics\\accuracy.py", + "ast_data": "FunctionDef name:accuracy arg:pred arg:target arg:topk arguments arg arg arg Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "_values_for_rank", + "source_code": "def _values_for_rank(self) -> np.ndarray:\n from pandas import Series\n if self.ordered:\n values = self.codes\n mask = values == -1\n if mask.any():\n values = values.astype('float64')\n values[mask] = np.nan\n elif is_any_real_numeric_dtype(self.categories.dtype):\n values = np.array(self)\n else:\n values = np.array(self.rename_categories(Series(self.categories, copy=False).rank().values))\n return values", + "docstring": "For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy.array", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_values_for_rank arg:self arguments arg If Assign Assign Compare If Call Assign Call Assign If Call Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "maybe_operate_rowwise", + "source_code": "def maybe_operate_rowwise(func: F) -> F:\n\n @functools.wraps(func)\n def newfunc(values: np.ndarray, *, axis: AxisInt | None=None, **kwargs):\n if axis == 1 and values.ndim == 2 and values.flags['C_CONTIGUOUS'] and (values.shape[1] / 1000 > values.shape[0]) and (values.dtype != object) and (values.dtype != bool):\n arrs = list(values)\n if kwargs.get('mask') is not None:\n mask = kwargs.pop('mask')\n results = [func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))]\n else:\n results = [func(x, **kwargs) for x in arrs]\n return np.array(results)\n return func(values, axis=axis, **kwargs)\n return cast(F, newfunc)", + "docstring": "NumPy operations on C-contiguous ndarrays with axis=1 can be very slow if axis 1 >> axis 0. Operate row-by-row and concatenate the results.", + "type": "function", + "file_path": "pandas\\pandas\\core\\nanops.py", + "ast_data": "FunctionDef name:maybe_operate_rowwise arg:func arguments arg FunctionDef name:newfunc arg:values arguments arg arg arg If BoolOp Compare Compare Compare Compare Compare Assign Call If Compare Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "__init__", + "source_code": "def __init__(self, dbfunc, domain, fillx=0, filly=0):\n super().__init__(dbfunc)\n self.domain = domain\n self.fillx = fillx\n self.filly = filly\n ufunc_domain[dbfunc] = domain\n ufunc_fills[dbfunc] = (fillx, filly)", + "docstring": "abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dbfunc arg:domain arg:fillx arg:filly arguments arg arg arg arg arg Call Call Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "send_object_list", + "source_code": "@_exception_logger\ndef send_object_list(object_list: list[Any], dst: Optional[int]=None, group: Optional[ProcessGroup]=None, device: Optional[torch.device]=None, group_dst: Optional[int]=None):\n group = _group_or_default_group(group)\n group_dst = _canonicalize_group_rank(group, dst, group_dst)\n _check_not_self_rank(group, group_dst, 'destination')\n if _rank_not_in_group(group):\n _warn_not_in_group('send_object_list')\n return\n current_device = device or _get_object_coll_device(group)\n tensor_list, size_list = zip(*[_object_to_tensor(obj, current_device, group) for obj in object_list])\n object_sizes_tensor = torch.cat(size_list)\n send(object_sizes_tensor, group_dst=group_dst, group=group)\n if len(tensor_list) == 1:\n object_tensor = tensor_list[0]\n else:\n object_tensor = torch.cat(tensor_list)\n send(object_tensor, group_dst=group_dst, group=group)", + "docstring": "Sends picklable objects in `sendobject_collectivessend_object_listsend_object_listsend` instead. Example:: >>> # xdoctest: +SKIP(\"need process group init\") >>> # Note: Process group initialization omitted on each rank. >>> import torch.distributed as dist >>> # Assumes backend is not NCCL >>> device = torch.device(\"cpu\") >>> if dist.get_rank() == 0: >>> # Assumes world_size of 2. >>> objects = [\"foo\", 12, {1: 2}] # any picklable object >>> dist.send_object_list(objects, dst=1, device=device) >>> else: >>> objects = [None, None, None] >>> dist.recv_object_list(objects, src=0, device=device) >>> objects ['foo', 12, {1: 2}]", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:send_object_list arg:object_list arg:dst arg:group arg:device arg:group_dst arguments arg arg arg arg arg Assign Call Assign Call Call If Call Call Return return:no Assign BoolOp Call Assign Call Call Assign Call Call If Compare Call Assign Assign Call Call" + }, + { + "library": "kornia", + "name": "KORNIA_CHECK", + "source_code": "def KORNIA_CHECK(condition: bool, msg: Optional[str]=None, raises: bool=True) -> bool:\n if not condition:\n if raises:\n raise Exception(f'{condition} not true.\\n{msg}')\n return False\n return True", + "docstring": "Check any arbitrary boolean condition. Args: condition: the condition to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if the condition is met and raises is True. Example: >>> x = torch.rand(2, 3, 3) >>> KORNIA_CHECK(x.shape[-2:] == (3, 3), \"Invalid homography\") True", + "type": "function", + "file_path": "kornia\\kornia\\core\\check.py", + "ast_data": "FunctionDef name:KORNIA_CHECK arg:condition arg:msg arg:raises arguments arg arg arg If If Raise Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "range", + "source_code": "@staticmethod\ndef range(*args, **kwargs) -> 'DatasetV2':\n from tensorflow.python.data.ops import range_op\n return range_op._range(*args, **kwargs)", + "docstring": "Creates a of a step-separated range of values. >>> ds = Dataset.range(5) >>> [a.item() for a in ds.as_numpy_iterator()] [0, 1, 2, 3, 4] >>> ds = Dataset.range(2, 5) >>> [a.item() for a in ds.as_numpy_iterator()] [2, 3, 4] >>> ds = Dataset.range(1, 5, 2) >>> [a.item() for a in ds.as_numpy_iterator()] [1, 3] >>> ds = Dataset.range(1, 5, -2) >>> [a.item() for a in ds.as_numpy_iterator()] [] >>> ds = Dataset.range(5, 1) >>> [a.item() for a in ds.as_numpy_iterator()] [] >>> ds = Dataset.range(5, 1, -2) >>> [a.item() for a in ds.as_numpy_iterator()] [5, 3] >>> ds = Dataset.range(2, 5, output_type=tf.int32) >>> [a.item() for a in ds.as_numpy_iterator()] [2, 3, 4] >>> ds = Dataset.range(1, 5, 2, output_type=tf.float32) >>> [a.item() for a in ds.as_numpy_iterator()] [1.0, 3.0] Args: *args: follows the same semantics as python's range. len(args) == 1 -> start = 0, stop = args[0], step = 1. len(args) == 2 -> start = args[0], stop = args[1], step = 1. len(args) == 3 -> start = args[0], stop = args[1], step = args[2]. **kwargs: - output_type: Its expected dtype. (Optional, default: ). - name: (Optional.) A name for the tf.data operation. Returns: Dataset: A . Raises: ValueError: if len(args) == 0.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:range arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_power_of_2", + "source_code": "def is_power_of_2(n: int) -> bool:\n return n > 0 and n & n - 1 == 0", + "docstring": "Returns whether n = 2 ** m for some integer m.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py", + "ast_data": "FunctionDef name:is_power_of_2 arg:n arguments arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, diagonals, diagonals_format=_COMPACT, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorTridiag'):\n parameters = dict(diagonals=diagonals, diagonals_format=diagonals_format, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n with ops.name_scope(name, values=[diagonals]):\n if diagonals_format not in _DIAGONAL_FORMATS:\n raise ValueError(f'Argument `diagonals_format` must be one of compact, matrix, or sequence. Received : {diagonals_format}.')\n if diagonals_format == _SEQUENCE:\n self._diagonals = [linear_operator_util.convert_nonref_to_tensor(d, name='diag_{}'.format(i)) for i, d in enumerate(diagonals)]\n dtype = self._diagonals[0].dtype\n else:\n self._diagonals = linear_operator_util.convert_nonref_to_tensor(diagonals, name='diagonals')\n dtype = self._diagonals.dtype\n self._diagonals_format = diagonals_format\n super(LinearOperatorTridiag, self).__init__(dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)", + "docstring": "Initialize a . Args: diagonals: or list of s depending on . If , this is a list of three 's each with shape , , representing the superdiagonal, diagonal and subdiagonal in that order. Note the superdiagonal is padded with an element in the last position, and the subdiagonal is padded with an element in the front. If this is a shaped representing the full tridiagonal matrix. If this is a shaped with the second to last dimension indexing the superdiagonal, diagonal and subdiagonal in that order. Note the superdiagonal is padded with an element in the last position, and the subdiagonal is padded with an element in the front. In every case, these s are all floating dtype. diagonals_format: one of , , or . Default is . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If is real, this is auto-set to . is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . Raises: TypeError: If is not an allowed type. ValueError: If is real, and is not .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_tridiag.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:diagonals arg:diagonals_format arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg arg Assign Call With Call If Compare Raise Call If Compare Assign Call Call Call Assign Assign Call Assign Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "_pairwise_similarity", + "source_code": "def _pairwise_similarity(a, b, similarity):\n a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)\n n_a = a_rows.shape[0]\n n_b = b_rows.shape[0]\n result = np.array([[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)] for i in range(n_a)])\n return result", + "docstring": "Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_bicluster.py", + "ast_data": "FunctionDef name:_pairwise_similarity arg:a arg:b arg:similarity arguments arg arg arg Assign Call Assign Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_symmetric_projection", + "source_code": "def _symmetric_projection(self, n):\n q = self._orthogonal_matrix(n)\n mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n if self.seed:\n self.seed += 1\n c = math_ops.multiply(q, mask)\n return math_ops.matmul(c, array_ops.matrix_transpose(c))", + "docstring": "Compute a n x n symmetric projection matrix. Args: n: Dimension. Returns: A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_symmetric_projection arg:self arg:n arguments arg arg Assign Call Assign Call Compare Call If Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "is_uniform", + "source_code": "def is_uniform(self):\n return self._uniform_row_length is not None", + "docstring": "Returns true if the partition is known to be uniform statically. This is based upon the existence of self._uniform_row_length. For example: RowPartition.from_row_lengths([3,3,3]).is_uniform()==false RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true RowPartition.from_row_lengths([2,0,2]).is_uniform()==false Returns: Whether a RowPartition is known to be uniform statically.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:is_uniform arg:self arguments arg Return return:yes Compare" + }, + { + "library": "scipy", + "name": "set_link_color_palette", + "source_code": "def set_link_color_palette(palette):\n if palette is None:\n palette = _link_line_colors_default\n elif not isinstance(palette, list | tuple):\n raise TypeError('palette must be a list or tuple')\n _ptypes = [isinstance(p, str) for p in palette]\n if False in _ptypes:\n raise TypeError('all palette list elements must be color strings')\n global _link_line_colors\n _link_line_colors = palette", + "docstring": "Set list of matplotlib color codes for use by dendrogram. Note that this palette is global (i.e., setting it once changes the colors for all subsequent calls to ) and that it affects only the the colors below `dendrogramdendrogram` producing plots with unexpected colors. Examples -------- >>> import numpy as np >>> from scipy.cluster import hierarchy >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., ... 400., 754., 564., 138., 219., 869., 669.]) >>> Z = hierarchy.linkage(ytdist, 'single') >>> dn = hierarchy.dendrogram(Z, no_plot=True) >>> dn['color_list'] ['C1', 'C0', 'C0', 'C0', 'C0'] >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b') >>> dn['color_list'] ['c', 'b', 'b', 'b', 'b'] >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, ... above_threshold_color='k') >>> dn['color_list'] ['c', 'm', 'm', 'k', 'k'] Now, reset the color palette to its default: >>> hierarchy.set_link_color_palette(None)", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:set_link_color_palette arg:palette arguments arg If Compare Assign If Call Raise Call Assign Call If Compare Raise Call Assign" + }, + { + "library": "pytorch", + "name": "exporter_context", + "source_code": "@deprecated('The feature will be removed. Please remove usage of this function and implement equivalent logic if needed', category=None)\n@contextlib.contextmanager\ndef exporter_context(model, mode: _C_onnx.TrainingMode, verbose: bool):\n with select_model_mode_for_export(model, mode) as mode_ctx, disable_apex_o2_state_dict_hook(model) as apex_ctx, setup_onnx_logging(verbose) as log_ctx:\n yield (mode_ctx, apex_ctx, log_ctx)", + "docstring": "A context manager to temporarily set the training mode of ``, disable the Apex O2 hook, and set the ONNX logging verbosity. .. deprecated:: 2.7 Please set training mode before exporting the model.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\utils.py", + "ast_data": "FunctionDef name:exporter_context arg:model arg:mode arg:verbose arguments arg arg arg With Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_verify_nn_module_stack", + "source_code": "def _verify_nn_module_stack(graph_module: torch.fx.GraphModule) -> None:\n for i, mod in enumerate([graph_module] + list(graph_module.modules())):\n if not isinstance(mod, torch.fx.GraphModule):\n continue\n for node in mod.graph.nodes:\n if node.op in ['call_function', 'get_attr']:\n if i == 0:\n if (nn_module_stack := node.meta.get('nn_module_stack', None)) is None:\n raise SpecViolationError(f'Node {node} of type {node.op} is missing nn_module_stack metadata')\n if not all((isinstance(k, str) and isinstance(v, tuple) and (len(v) == 2) and all((isinstance(x, str) for x in v)) for k, v in nn_module_stack.items())):\n raise SpecViolationError(f'Node {node} of type {node.op} has incorrect nn_module_stack metadata formatexpected Dict[str, Tuple[str, str]], but got {nn_module_stack}')\n elif node.op in ['placeholder', 'output']:\n if node.meta.get('nn_module_stack', None):\n raise SpecViolationError(f'Node {node} of type {node.op} contains nn_module_stack metadata, this should be None')", + "docstring": "Perform nn_module_stack checks on the graph. Current constraints: For the top level graph: - populated for 'call_function', 'get_attr' - None for 'placeholder', 'output' For submodule graphs: - None for 'placeholder', output' TODO(pianpwk): make this a consistent node-level check once nn_module_stack is populated for cond submodules.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_verify_nn_module_stack arg:graph_module arguments arg For Call Call Call If Call For If Compare If Compare If Compare Call Raise Call If Call BoolOp Call Call Compare Call Call Call Call Raise Call If Compare If Call Raise Call" + }, + { + "library": "matplotlib", + "name": "_get_w_centers_ranges", + "source_code": "def _get_w_centers_ranges(self):\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n cx = (maxx + minx) / 2\n cy = (maxy + miny) / 2\n cz = (maxz + minz) / 2\n dx = maxx - minx\n dy = maxy - miny\n dz = maxz - minz\n return (cx, cy, cz, dx, dy, dz)", + "docstring": "Get 3D world centers and axis ranges.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:_get_w_centers_ranges arg:self arguments arg Assign Call Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "equalize", + "source_code": "def equalize(model, paired_modules_list, threshold=0.0001, inplace=True):\n paired_modules_list = process_paired_modules_list_to_name(model, paired_modules_list)\n if not inplace:\n model = copy.deepcopy(model)\n paired_modules_list = expand_groups_in_paired_modules_list(paired_modules_list)\n name_to_module: dict[str, torch.nn.Module] = {}\n previous_name_to_module: dict[str, Any] = {}\n name_set = set(chain.from_iterable(paired_modules_list))\n for name, module in model.named_modules():\n if name in name_set:\n name_to_module[name] = module\n previous_name_to_module[name] = None\n while not converged(name_to_module, previous_name_to_module, threshold):\n for pair in paired_modules_list:\n previous_name_to_module[pair[0]] = copy.deepcopy(name_to_module[pair[0]])\n previous_name_to_module[pair[1]] = copy.deepcopy(name_to_module[pair[1]])\n cross_layer_equalization(name_to_module[pair[0]], name_to_module[pair[1]])\n return model", + "docstring": "Equalize modules until convergence is achieved. Given a list of adjacent modules within a model, equalization will be applied between each pair, this will repeated until convergence is achieved Keeps a copy of the changing modules from the previous iteration, if the copies are not that different than the current modules (determined by converged_test), then the modules have converged enough that further equalizing is not necessary Reference is section 4.1 of this paper Args: model: a model (nn.Module) that equalization is to be applied on paired_modules_list (List(List[nn.module || str])): a list of lists where each sublist is a pair of two submodules found in the model, for each pair the two modules have to be adjacent in the model, with only piece-wise-linear functions like a (P)ReLU or LeakyReLU in between to get expected results. The list can contain either modules, or names of modules in the model. If you pass multiple modules in the same list, they will all be equalized together. threshold (float): a number used by the converged function to determine what degree of similarity between models is necessary for them to be called equivalent inplace (bool): determines if function is inplace or not", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py", + "ast_data": "FunctionDef name:equalize arg:model arg:paired_modules_list arg:threshold arg:inplace arguments arg arg arg arg Assign Call If Assign Call Assign Call Assign Call Call For Call If Compare Assign Assign While Call For Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n U, S, _, X, x_is_centered, xp = self._fit(X)\n if U is not None:\n U = U[:, :self.n_components_]\n if self.whiten:\n U *= sqrt(X.shape[0] - 1)\n else:\n U *= S[:self.n_components_]\n return U\n else:\n return self._transform(X, xp, x_is_centered=x_is_centered)", + "docstring": "Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare Assign If Call Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "tricontour", + "source_code": "@_docstring.Substitution(func='tricontour', type='lines')\n@_docstring.interpd\ndef tricontour(ax, *args, **kwargs):\n kwargs['filled'] = False\n return TriContourSet(ax, *args, **kwargs)", + "docstring": "%(_tricontour_doc)s linewidths : float or array-like, default: :rc: The line width of the contour lines. If a number, all levels will be plotted with this linewidth. If a sequence, the levels in ascending order will be plotted with the linewidths in the order specified. If None, this falls back to :rc:. linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional If *linestyles* is *None*, the default is 'solid' unless the lines are monochrome. In that case, negative contours will take their linestyle from :rc: setting. *linestyles* can also be an iterable of the above strings specifying a set of linestyles to be used. If this iterable is shorter than the number of contour levels it will be repeated as necessary.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py", + "ast_data": "FunctionDef name:tricontour arg:ax arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)", + "docstring": "Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "private_bytes_raw", + "source_code": "@abc.abstractmethod\ndef private_bytes_raw(self) -> bytes:\n pass", + "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:private_bytes_raw arg:self arguments arg" + }, + { + "library": "numpy", + "name": "pprint", + "source_code": "def pprint(self):\n names = self.dtype.names\n maxlen = max((len(name) for name in names))\n fmt = '%% %ds: %%s' % maxlen\n rows = [fmt % (name, getattr(self, name)) for name in names]\n return '\\n'.join(rows)", + "docstring": "Pretty-print all fields.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\records.py", + "ast_data": "FunctionDef name:pprint arg:self arguments arg Assign Assign Call Call Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_np_doc_form", + "source_code": "def set_np_doc_form(value):\n global _np_doc_form\n _np_doc_form = value", + "docstring": "Selects the form of the original numpy docstrings. This function sets a global variable that controls how a tf-numpy symbol's docstring should refer to the original numpy docstring. If is , the numpy docstring will be verbatim copied into the tf-numpy docstring. Otherwise, a link to the original numpy docstring will be added. Which numpy version the link points to depends on : * : the current stable version; * : the current development version; * pattern : will be treated as a version number, e.g. '1.16'. Args: value: the value to set the global variable to.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:set_np_doc_form arg:value arguments arg Assign" + }, + { + "library": "django", + "name": "SuccessMessageMixin", + "source_code": "class SuccessMessageMixin:\n success_message = ''\n\n def form_valid(self, form):\n response = super().form_valid(form)\n success_message = self.get_success_message(form.cleaned_data)\n if success_message:\n messages.success(self.request, success_message)\n return response\n\n def get_success_message(self, cleaned_data):\n return self.success_message % cleaned_data", + "docstring": "Add a success message on successful form submission.", + "type": "class", + "file_path": "django\\django\\contrib\\messages\\views.py", + "ast_data": "ClassDef name:SuccessMessageMixin Assign FunctionDef name:form_valid arg:self arg:form arguments arg arg Assign Call Call Assign Call If Call Return return:yes FunctionDef name:get_success_message arg:self arg:cleaned_data arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, *args, **kwargs):\n if not args:\n super().__init__(self._create_polygon(0, None), **kwargs)\n return\n ext_ring, *init_holes = args\n n_holes = len(init_holes)\n if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):\n if not init_holes[0]:\n init_holes = ()\n n_holes = 0\n elif isinstance(init_holes[0][0], LinearRing):\n init_holes = init_holes[0]\n n_holes = len(init_holes)\n polygon = self._create_polygon(n_holes + 1, [ext_ring, *init_holes])\n super().__init__(polygon, **kwargs)", + "docstring": "Initialize on an exterior ring and a sequence of holes (both instances may be either LinearRing instances, or a tuple/list that may be constructed into a LinearRing). Examples of initialization, where shell, hole1, and hole2 are valid LinearRing geometries: >>> from django.contrib.gis.geos import LinearRing, Polygon >>> shell = hole1 = hole2 = LinearRing() >>> poly = Polygon(shell, hole1, hole2) >>> poly = Polygon(shell, (hole1, hole2)) >>> # Example where a tuple parameters are used: >>> poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)), ... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Call Call Call Return return:no Assign Assign Call If BoolOp Compare Call If Assign Assign If Call Assign Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "equal", + "source_code": "@tf_export('math.equal', 'equal')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef equal(x, y, name=None):\n return gen_math_ops.equal(x, y, name=name)", + "docstring": "Returns the truth value of (x == y) element-wise. Performs a [broadcast]( with the arguments and then an element-wise equality comparison, returning a Tensor of boolean values. For example: >>> x = tf.constant([2, 4]) >>> y = tf.constant(2) >>> tf.math.equal(x, y) >>> x = tf.constant([2, 4]) >>> y = tf.constant([2, 4]) >>> tf.math.equal(x, y) Args: x: A . y: A . name: A name for the operation (optional). Returns: A of type bool with the same size as that of x or y. Raises: : If shapes of arguments are incompatible", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:equal arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_EmbeddingColumnLayer", + "source_code": "class _EmbeddingColumnLayer(base.Layer):\n\n def __init__(self, embedding_shape, initializer, weight_collections=None, trainable=True, name=None, **kwargs):\n super(_EmbeddingColumnLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n self._embedding_shape = embedding_shape\n self._initializer = initializer\n self._weight_collections = weight_collections\n\n def set_weight_collections(self, weight_collections):\n self._weight_collections = weight_collections\n\n def build(self, _):\n self._embedding_weight_var = self.add_variable(name='embedding_weights', shape=self._embedding_shape, dtype=dtypes.float32, initializer=self._initializer, trainable=self.trainable)\n if self._weight_collections and (not context.executing_eagerly()):\n _add_to_collections(self._embedding_weight_var, self._weight_collections)\n self.built = True\n\n def call(self, _):\n return self._embedding_weight_var", + "docstring": "A layer that stores all the state required for a embedding column.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "ClassDef name:_EmbeddingColumnLayer FunctionDef name:__init__ arg:self arg:embedding_shape arg:initializer arg:weight_collections arg:trainable arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign FunctionDef name:set_weight_collections arg:self arg:weight_collections arguments arg arg Assign FunctionDef name:build arg:self arg:_ arguments arg arg Assign Call If BoolOp Call Call Assign FunctionDef name:call arg:self arg:_ arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_chief", + "source_code": "@property\ndef is_chief(self):\n return self._is_chief_node", + "docstring": "Returns whether the task is a chief node.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "FunctionDef name:is_chief arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "bmat", + "source_code": "def bmat(blocks, format=None, dtype=None):\n blocks = np.asarray(blocks, dtype='object')\n if any((isinstance(b, sparray) for b in blocks.flat)):\n return _block(blocks, format, dtype)\n else:\n return _block(blocks, format, dtype, return_spmatrix=True)", + "docstring": "Build a sparse array or matrix from sparse sub-blocks Note: is preferred over `block_arrayblocks`. See Also -------- block_array Examples -------- >>> from scipy.sparse import coo_array, bmat >>> A = coo_array([[1, 2], [3, 4]]) >>> B = coo_array([[5], [6]]) >>> C = coo_array([[7]]) >>> bmat([[A, B], [None, C]]).toarray() array([[1, 2, 5], [3, 4, 6], [0, 0, 7]]) >>> bmat([[A, None], [None, C]]).toarray() array([[1, 2, 0], [3, 4, 0], [0, 0, 7]])", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_construct.py", + "ast_data": "FunctionDef name:bmat arg:blocks arg:format arg:dtype arguments arg arg arg Assign Call If Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "add_to_set", + "source_code": "def add_to_set(self, event_name: str, key: str, value: Any):\n if event_name not in self.get_stack():\n raise RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\")\n event_data = self.get_event_data()\n if event_name not in event_data:\n event_data[event_name] = {}\n if key not in event_data[event_name]:\n event_data[event_name][key] = set()\n event_data[event_name][key].add(value)", + "docstring": "Add a value to a set within a event_name's metadata if it exists", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:add_to_set arg:self arg:event_name arg:key arg:value arguments arg arg arg arg If Compare Call Raise Call Call Call Assign Call If Compare Assign If Compare Assign Call Call" + }, + { + "library": "pytorch", + "name": "logaddexp", + "source_code": "def logaddexp(input: Union[Tensor, MaskedTensor], other: Union[Tensor, MaskedTensor], *, dtype: Optional[DType]=None, input_mask: Optional[Tensor]=None, other_mask: Optional[Tensor]=None) -> Tensor:\n if dtype is None:\n dtype = input.dtype\n if input.layout == torch.strided and other.layout == torch.strided:\n mask_input = _combine_input_and_mask(logaddexp, input, input_mask)\n mask_other = _combine_input_and_mask(logaddexp, other, other_mask)\n return torch.logaddexp(mask_input, mask_other).to(dtype=dtype)\n else:\n raise ValueError(f'masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)')", + "docstring": "logaddexp(input, other, *, dtype=None, input_mask=None, other_mask=None) -> Tensor Returns logaddexp of all the elements in the :attr: and the :attr: tensor. The :attr: elements are masked out according to the boolean tensor :attr: and the attr: elements are masked out according to the boolean tensor :attr:. The shapes of a mask tensor and the tensor to be masked don't need to match, but they must be :ref: and the dimensionality of the mask tensor must not be greater than of the tensor to be masked. Args: input (Tensor): the input tensor other (Tensor): the second input tensor Keyword args: dtype (:class:, optional): the desired data type of returned tensor. If specified, the output tensor is casted to :attr: after the operation is performed. Default: None. input_mask (:class:, optional): the boolean tensor containing the binary mask of validity of :attr: tensor elements. Default: None that is equivalent to `torch.Tensorother`. Example:: >>> input = torch.tensor([-100.0, -200, -300]) >>> input tensor([-100., -200., -300.]) >>> other = torch.tensor([-1.0, -2, -3]) >>> other tensor([-1., -2., -3.]) >>> mask = torch.tensor([True, False, True]) >>> mask tensor([ True, False, True]) >>> torch.masked._ops.logaddexp(input, other, input_mask=mask, other_mask=mask) tensor([-1., -inf, -3.])", + "type": "function", + "file_path": "pytorch\\torch\\masked\\_ops.py", + "ast_data": "FunctionDef name:logaddexp arg:input arg:other arguments arg arg arg arg arg If Compare Assign If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call Call Raise Call" + }, + { + "library": "matplotlib", + "name": "on_clicked", + "source_code": "def on_clicked(self, func):\n return self._observers.connect('clicked', func)", + "docstring": "Connect the callback function *func* to button click events. Parameters ---------- func : callable When the button is clicked, call *func* with button label. When all buttons are cleared, call *func* with None. The callback func must have the signature:: def func(label: str | None) -> Any Return values may exist, but are ignored. Returns ------- A connection id, which can be used to disconnect the callback.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:on_clicked arg:self arg:func arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "hermecompanion", + "source_code": "def hermecompanion(c):\n [c] = pu.as_series([c])\n if len(c) < 2:\n raise ValueError('Series must have maximum degree of at least 1.')\n if len(c) == 2:\n return np.array([[-c[0] / c[1]]])\n n = len(c) - 1\n mat = np.zeros((n, n), dtype=c.dtype)\n scl = np.hstack((1.0, 1.0 / np.sqrt(np.arange(n - 1, 0, -1))))\n scl = np.multiply.accumulate(scl)[::-1]\n top = mat.reshape(-1)[1::n + 1]\n bot = mat.reshape(-1)[n::n + 1]\n top[...] = np.sqrt(np.arange(1, n))\n bot[...] = top\n mat[:, -1] -= scl * c[:-1] / c[-1]\n return mat", + "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is an HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of HermiteE series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg).", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:hermecompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "ONNXRuntimeOptions", + "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.')\nclass ONNXRuntimeOptions:\n session_options: Sequence[onnxruntime.SessionOptions] | None = None\n 'ONNX Runtime session options.'\n execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None = None\n 'ONNX Runtime execution providers to use during model execution.'\n execution_provider_options: Sequence[dict[Any, Any]] | None = None\n 'ONNX Runtime execution provider options.'\n\n def __init__(self, *, session_options: Sequence[onnxruntime.SessionOptions] | None=None, execution_providers: Sequence[str | tuple[str, dict[Any, Any]]] | None=None, execution_provider_options: Sequence[dict[Any, Any]] | None=None):\n self.session_options = session_options\n self.execution_providers = execution_providers\n self.execution_provider_options = execution_provider_options", + "docstring": "Options to influence the execution of the ONNX model through ONNX Runtime. .. deprecated:: 2.7 Please use `` instead. Attributes: session_options: ONNX Runtime session options. execution_providers: ONNX Runtime execution providers to use during model execution. execution_provider_options: ONNX Runtime execution provider options.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "ClassDef name:ONNXRuntimeOptions FunctionDef name:__init__ arg:self arguments arg arg arg arg Assign Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "_compute_gram", + "source_code": "def _compute_gram(self, X, sqrt_sw):\n center = self.fit_intercept and sparse.issparse(X)\n if not center:\n X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n return (safe_sparse_dot(X, X.T, dense_output=True), X_mean)\n n_samples = X.shape[0]\n sample_weight_matrix = sparse.dia_matrix((sqrt_sw, 0), shape=(n_samples, n_samples))\n X_weighted = sample_weight_matrix.dot(X)\n X_mean, _ = mean_variance_axis(X_weighted, axis=0)\n X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)\n X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)\n X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)\n return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean)", + "docstring": "Computes the Gram matrix XX^T with possible centering. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- gram : ndarray of shape (n_samples, n_samples) The Gram matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of `` for each feature. Notes ----- When X is dense the centering has been done in preprocessing so the mean is 0 and we just compute XX^T. When X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py", + "ast_data": "FunctionDef name:_compute_gram arg:self arg:X arg:sqrt_sw arguments arg arg arg Assign BoolOp Call If Assign Call Return return:yes Call Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "RemoveInputMutation", + "source_code": "class RemoveInputMutation(_pass.Transform):\n\n def _run(self, *args) -> torch.fx.GraphModule:\n for node in reversed(self.module.graph.nodes):\n if node.op == 'call_function' and node.target == torch.ops.aten.copy_.default and (len(node.users) == 0) and isinstance(node.args[0], torch.fx.Node) and (node.args[0].op == 'placeholder'):\n self.module.graph.erase_node(node)\n return self.module", + "docstring": "Remove nodes that mutate module inputs. This pass is recommended to be used after `aten.copy_.default` nodes to the graph when it detects mutations to inputs. These nodes are not needed for ONNX export for inference. They could be useful for training.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\functionalization.py", + "ast_data": "ClassDef name:RemoveInputMutation FunctionDef name:_run arg:self arguments arg arg For Call If BoolOp Compare Compare Compare Call Call Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "SumLikeReductionTypePromotionRule", + "source_code": "class SumLikeReductionTypePromotionRule(ReductionTypePromotionRule):\n\n def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n assert len(args) >= 1, f'Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument'\n arg = args[0]\n assert isinstance(arg, torch.Tensor), f'type(arg)={type(arg)!r} is not torch.Tensor'\n dtype: torch.dtype | None = kwargs.get('dtype', None)\n if dtype is None:\n if _prims_common.is_boolean_dtype(arg.dtype) or _prims_common.is_integer_dtype(arg.dtype):\n dtype = torch.int64\n else:\n dtype = arg.dtype\n return super().preview_type_promotion(args, {'dtype': dtype})", + "docstring": "Reference type promotion rule from torch.ops.aten.sum. This is a special case where computation dtype is always torch.int64 for integral arg, unless overridden by kwarg.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "ClassDef name:SumLikeReductionTypePromotionRule FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg Compare Call Assign Call Call Call If Compare If BoolOp Call Call Assign Assign Return return:yes Call Call" + }, + { + "library": "django", + "name": "SQLiteCursorWrapper", + "source_code": "class SQLiteCursorWrapper(Database.Cursor):\n\n def execute(self, query, params=None):\n if params is None:\n return super().execute(query)\n param_names = list(params) if isinstance(params, Mapping) else None\n query = self.convert_query(query, param_names=param_names)\n return super().execute(query, params)\n\n def executemany(self, query, param_list):\n peekable, param_list = tee(iter(param_list))\n if (params := next(peekable, None)) and isinstance(params, Mapping):\n param_names = list(params)\n else:\n param_names = None\n query = self.convert_query(query, param_names=param_names)\n return super().executemany(query, param_list)\n\n def convert_query(self, query, *, param_names=None):\n if param_names is None:\n return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')\n else:\n return query % {name: f':{name}' for name in param_names}", + "docstring": "Django uses the \"format\" and \"pyformat\" styles, but Python's sqlite3 module supports neither of these styles. This wrapper performs the following conversions: - \"format\" style to \"qmark\" style - \"pyformat\" style to \"named\" style In both cases, if you want to use a literal \"%s\", you'll need to use \"%%s\".", + "type": "class", + "file_path": "django\\django\\db\\backends\\sqlite3\\base.py", + "ast_data": "ClassDef name:SQLiteCursorWrapper FunctionDef name:execute arg:self arg:query arg:params arguments arg arg arg If Compare Return return:yes Call Call Assign Call Call Assign Call Return return:yes Call Call FunctionDef name:executemany arg:self arg:query arg:param_list arguments arg arg arg Assign Call Call If BoolOp Call Call Assign Call Assign Assign Call Return return:yes Call Call FunctionDef name:convert_query arg:self arg:query arguments arg arg arg If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_anncoords", + "source_code": "def get_anncoords(self):\n return self._textcoords", + "docstring": "Return the coordinate system to use for . See also *xycoords* in .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_anncoords arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_write_tensor_list_section", + "source_code": "def _write_tensor_list_section(self, graph_order):\n self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_TENSOR_LIST))\n self._write_report('%s %d\\n' % (_FIELD_NAME_NUM_TENSORS, len(graph_order.tensors)))\n for i in range(0, len(graph_order.tensors)):\n tensor = graph_order.tensors[i]\n line = '%d \"%s\"' % (i, tensor.name)\n consumers = tensor.consumers()\n consumers.sort(key=lambda op: op.name)\n for consumer_op in consumers:\n if consumer_op.name not in graph_order.op_to_idx:\n raise ValueError('consumer_op is not in op_to_idx. got consumer_op={}, op_to_idx={}'.format(consumer_op.name, graph_order.op_to_idx))\n line += ' %d' % graph_order.op_to_idx[consumer_op.name]\n line += '\\n'\n self._write_report(line)\n self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_TENSOR_LIST))", + "docstring": "Writes the tensor-list section of the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_tensor_list_section arg:self arg:graph_order arguments arg arg Call Call Call For Call Call Assign Assign Assign Call Call arguments arg For If Compare Raise Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "cosine_decay_restarts", + "source_code": "@tf_export(v1=['train.cosine_decay_restarts'])\ndef cosine_decay_restarts(learning_rate, global_step, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None):\n decayed_lr = learning_rate_schedule.CosineDecayRestarts(learning_rate, first_decay_steps, t_mul=t_mul, m_mul=m_mul, alpha=alpha, name=name)\n if not context.executing_eagerly():\n decayed_lr = decayed_lr(global_step)\n else:\n decayed_lr = functools.partial(decayed_lr, global_step)\n return decayed_lr", + "docstring": "Applies cosine decay with restarts to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a cosine decay function with restarts to a provided initial learning rate. It requires a value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate while taking into account possible warm restarts. The learning rate multiplier first decays from 1 to for steps. Then, a warm restart is performed. Each new warm restart runs for times more steps and with times smaller initial learning rate. Example usage: Args: learning_rate: A scalar or Tensor or a Python number. The initial learning rate. global_step: A scalar or or a Python number. Global step to use for the decay computation. first_decay_steps: A scalar or or a Python number. Number of steps to decay over. t_mul: A scalar or or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar or or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar or Tensor or a Python number. Minimum learning rate value as a fraction of the learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. Returns: A scalar of the same type as . The decayed learning rate. Raises: ValueError: if is not supplied. References: Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] ( ([pdf]( @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\legacy_learning_rate_decay.py", + "ast_data": "FunctionDef name:cosine_decay_restarts arg:learning_rate arg:global_step arg:first_decay_steps arg:t_mul arg:m_mul arg:alpha arg:name arguments arg arg arg arg arg arg arg Assign Call If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_3d_properties", + "source_code": "def set_3d_properties(self, z=0, zdir='z', axlim_clip=False):\n self._z = z\n self._dir_vec = get_dir_vector(zdir)\n self._axlim_clip = axlim_clip\n self.stale = True", + "docstring": "Set the *z* position and direction of the text. Parameters ---------- z : float The z-position in 3D space. zdir : {'x', 'y', 'z', 3-tuple} The direction of the text. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide text outside the axes view limits. .. versionadded:: 3.10", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_3d_properties arg:self arg:z arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Assign Call Assign Assign" + }, + { + "library": "scikit-learn", + "name": "_loss_grad_lbfgs", + "source_code": "def _loss_grad_lbfgs(self, packed_coef_inter, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads):\n self._unpack(packed_coef_inter)\n loss, coef_grads, intercept_grads = self._backprop(X, y, sample_weight, activations, deltas, coef_grads, intercept_grads)\n grad = _pack(coef_grads, intercept_grads)\n return (loss, grad)", + "docstring": "Compute the MLP loss function and its corresponding derivatives with respect to the different parameters given in the initialization. Returned gradients are packed in a single vector so it can be used in lbfgs Parameters ---------- packed_coef_inter : ndarray A vector comprising the flattened coefficients and intercepts. X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float grad : array-like, shape (number of nodes of all layers,)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py", + "ast_data": "FunctionDef name:_loss_grad_lbfgs arg:self arg:packed_coef_inter arg:X arg:y arg:sample_weight arg:activations arg:deltas arg:coef_grads arg:intercept_grads arguments arg arg arg arg arg arg arg arg arg Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_realized", + "source_code": "def is_realized(self):\n return True", + "docstring": "Used by LazyVariableTracker to indicate an unrealized node", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "FunctionDef name:is_realized arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "mminfo", + "source_code": "def mminfo(source):\n return MMFile.info(source)", + "docstring": "Return size and storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. Examples -------- >>> from io import StringIO >>> from scipy.io import mminfo >>> text = '''%%MatrixMarket matrix coordinate real general ... 5 5 7 ... 2 3 1.0 ... 3 4 2.0 ... 3 5 3.0 ... 4 1 4.0 ... 4 2 5.0 ... 4 3 6.0 ... 4 4 7.0 ... ''' `` returns the number of rows, number of columns, format, field type and symmetry attribute of the source file. >>> mminfo(StringIO(text)) (5, 5, 7, 'coordinate', 'real', 'general')", + "type": "function", + "file_path": "scipy\\scipy\\io\\_mmio.py", + "ast_data": "FunctionDef name:mminfo arg:source arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "strip_local_scope", + "source_code": "def strip_local_scope(s: str) -> str:\n import re\n pattern = 'L\\\\[\\\\s*[\\'\\\\\"](.*?)[\\'\\\\\"]\\\\s*\\\\]'\n return re.sub(pattern, '\\\\1', s)", + "docstring": "Replace occurrences of L[...] with just the inner content. Handles both single and double quotes. This is to generate user friendly recompilation messages.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:strip_local_scope arg:s arguments arg Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "clip_cmd", + "source_code": "def clip_cmd(self, cliprect, clippath):\n cmds = []\n while (self._cliprect, self._clippath) != (cliprect, clippath) and self.parent is not None:\n cmds.extend(self.pop())\n if (self._cliprect, self._clippath) != (cliprect, clippath) or self.parent is None:\n cmds.extend(self.push())\n if self._cliprect != cliprect:\n cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])\n if self._clippath != clippath:\n path, affine = clippath.get_transformed_path_and_affine()\n cmds.extend(PdfFile.pathOperations(path, affine, simplify=False) + [Op.clip, Op.endpath])\n return cmds", + "docstring": "Set clip rectangle. Calls and .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:clip_cmd arg:self arg:cliprect arg:clippath arguments arg arg arg Assign While BoolOp Compare Compare Call Call If BoolOp Compare Compare Call Call If Compare Call If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "generate_filtered_tables", + "source_code": "def generate_filtered_tables(self, feature_filter: str='', module_fqn_filter: str='') -> dict[str, tuple[list, list]]:\n filtered_data: OrderedDict[str, Any] = self._get_filtered_data(feature_filter, module_fqn_filter)\n tensor_features: set[str] = set()\n channel_features: set[str] = set()\n num_channels: int = 0\n for module_fqn in filtered_data:\n for feature_name in filtered_data[module_fqn]:\n feature_data = filtered_data[module_fqn][feature_name]\n is_tensor: bool = isinstance(feature_data, torch.Tensor)\n is_not_zero_dim: bool = is_tensor and len(feature_data.shape) != 0\n if is_not_zero_dim or isinstance(feature_data, list):\n channel_features.add(feature_name)\n num_channels = len(feature_data)\n else:\n tensor_features.add(feature_name)\n tensor_features_list: list[str] = sorted(tensor_features)\n channel_features_list: list[str] = sorted(channel_features)\n tensor_headers, tensor_table = self._generate_tensor_table(filtered_data, tensor_features_list)\n channel_headers, channel_table = self._generate_channels_table(filtered_data, channel_features_list, num_channels)\n table_dict = {self.TABLE_TENSOR_KEY: (tensor_headers, tensor_table), self.TABLE_CHANNEL_KEY: (channel_headers, channel_table)}\n return table_dict", + "docstring": "Takes in optional filter values and generates two tables with desired information. The generated tables are presented in both a list-of-lists format The reason for the two tables are that they handle different things: 1.) the first table handles all tensor level information 2.) the second table handles and displays all channel based information The reasoning for this is that having all the info in one table can make it ambiguous which collected statistics are global, and which are actually per-channel, so it's better to split it up into two tables. This also makes the information much easier to digest given the plethora of statistics collected Tensor table columns: idx layer_fqn feature_1 feature_2 feature_3 .... feature_n ---- --------- --------- --------- --------- --------- Per-Channel table columns: idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n ---- --------- ------- --------- --------- --------- --------- Args: feature_filter (str, optional): Filters the features presented to only those that contain this filter substring Default = \"\", results in all the features being printed module_fqn_filter (str, optional): Only includes modules that contains this string Default = \"\", results in all the modules in the reports to be visible in the table Returns a dictionary with two keys: (Dict[str, Tuple[List, List]]) A dict containing two keys: \"tensor_level_info\", \"channel_level_info\" Each key maps to a tuple with: A list of the headers of each table A list of lists containing the table information row by row The 0th index row will contain the headers of the columns The rest of the rows will contain data Example Use: >>> # xdoctest: +SKIP(\"undefined variables\") >>> mod_report_visualizer.generate_filtered_tables( ... feature_filter = \"per_channel_min\", ... module_fqn_filter = \"block1\" ... ) # generates table with per_channel_min info for all modules in block 1 of the model", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py", + "ast_data": "FunctionDef name:generate_filtered_tables arg:self arg:feature_filter arg:module_fqn_filter arguments arg arg arg Call Call Call For For Assign Call BoolOp Compare Call If BoolOp Call Call Assign Call Call Call Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "ColorConverter", + "source_code": "class ColorConverter:\n colors = _colors_full_map\n cache = _colors_full_map.cache\n to_rgb = staticmethod(to_rgb)\n to_rgba = staticmethod(to_rgba)\n to_rgba_array = staticmethod(to_rgba_array)", + "docstring": "A class only kept for backwards compatibility. Its functionality is entirely provided by module-level functions.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "ClassDef name:ColorConverter Assign Assign Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_conv_2d_backprop_input_flops", + "source_code": "@ops.RegisterStatistics('Conv2DBackpropInput', 'flops')\ndef _conv_2d_backprop_input_flops(graph, node):\n _verify_conv_data_format(node)\n out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n out_shape.assert_is_fully_defined()\n kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n kernel_shape.assert_is_fully_defined()\n strides_shape = list(node.attr['strides'].list.i)\n strides_product = strides_shape[1] * strides_shape[2]\n return ops.OpStats('flops', 2 * out_shape.num_elements() * kernel_shape.num_elements() / (out_shape.dims[-1].value * strides_product))", + "docstring": "Compute flops for Conv2DBackpropInput operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_conv_2d_backprop_input_flops arg:graph arg:node arguments arg arg Call Assign Call Call Assign Call Call Assign Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, fp, headers, boundary):\n Entity.__init__(self, fp, headers)\n self.boundary = boundary\n self.file = None\n self.value = None", + "docstring": "Initialize an entity part.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fp arg:headers arg:boundary arguments arg arg arg arg Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "is_lowered_module", + "source_code": "def is_lowered_module(obj: Any) -> bool:\n return type(obj).__name__ == LOWERED_BACKEND_MODULE_TYPE", + "docstring": "This function is added to avoid using isinstance(obj, LoweredBackendModule) as it will import LoweredBackendModule, which may cause a circular import.", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\executorch_call_delegate.py", + "ast_data": "FunctionDef name:is_lowered_module arg:obj arguments arg Return return:yes Compare Call" + }, + { + "library": "scipy", + "name": "_entropy", + "source_code": "def _entropy(self, mu):\n a = 1.0 + np.log(2 * np.pi) + 3 * np.log(mu)\n r = 2 / mu\n b = sc._ufuncs._scaled_exp1(r) / r\n return 0.5 * a - 1.5 * b", + "docstring": "Ref.: (eq. 9)", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:_entropy arg:self arg:mu arguments arg arg Assign Call Call Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "DTDForbidden", + "source_code": "class DTDForbidden(DefusedXmlException):\n\n def __init__(self, name, sysid, pubid):\n super().__init__()\n self.name = name\n self.sysid = sysid\n self.pubid = pubid\n\n def __str__(self):\n tpl = \"DTDForbidden(name='{}', system_id={!r}, public_id={!r})\"\n return tpl.format(self.name, self.sysid, self.pubid)", + "docstring": "Document type definition is forbidden.", + "type": "class", + "file_path": "django\\django\\core\\serializers\\xml_serializer.py", + "ast_data": "ClassDef name:DTDForbidden FunctionDef name:__init__ arg:self arg:name arg:sysid arg:pubid arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n weight_quant_dequant = self.get_weight()\n result = F.conv2d(x, weight_quant_dequant, self.bias, self.stride, self.padding, self.dilation, self.groups)\n return result", + "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.conv2d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.conv2d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv2d", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py", + "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "codegen_sync", + "source_code": "def codegen_sync(self) -> None:\n raise NotImplementedError", + "docstring": "Generate synchronization code for the kernel. This method depends on the hardware characteristics.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:codegen_sync arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "finish_plan", + "source_code": "@abc.abstractmethod\ndef finish_plan(self, central_plan: LoadPlan) -> LoadPlan:\n pass", + "docstring": "Accept the plan from coordinator and return final LoadPlan.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py", + "ast_data": "FunctionDef name:finish_plan arg:self arg:central_plan arguments arg arg" + }, + { + "library": "pytorch", + "name": "_formula_transposed", + "source_code": "def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:\n return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1", + "docstring": "Formula to apply to calculate the length of some dimension of the output if transposed convolution is used. See: Args: ln: length of the dimension p: padding in that dim d: dilation in that dim k: kernel size in that dim s: stride in that dim op: output padding in that dim Returns: The output length", + "type": "function", + "file_path": "pytorch\\torch\\_meta_registrations.py", + "ast_data": "FunctionDef name:_formula_transposed arg:ln arg:p arg:d arg:k arg:s arg:op arguments arg arg arg arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "FakeOptimizerForRestoration", + "source_code": "class FakeOptimizerForRestoration(trackable.Trackable):\n\n def __init__(self, optimizer):\n self._optimizer = optimizer\n\n def get_slot_names(self):\n return self._optimizer.get_slot_names()\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n return self._optimizer._create_or_restore_slot_variable(slot_variable_position, slot_name, variable)", + "docstring": "A fake optimizer used to support restoring TensorFlow 2.2 checkpoints. The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow. In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the following in LossScaleOptimizer.__init__ This means a dependency from the LossScaleOptimizer to the wrapped optimizer would be stored in the checkpoint. However now, the checkpoint format with a LossScaleOptimizer is the same as the format without a LossScaleOptimizer, except the loss scale is also stored. This means there is no dependency from the LossScaleOptimizer to the wrapped optimizer. Instead, the LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's perspective, by overriding all Trackable methods and delegating them to the wrapped optimizer. To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency on this class instead of the inner optimizer. When restored, this class will instead restore the slot variables of the inner optimizer. Since this class has no variables, it does not affect the checkpoint when saved.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "ClassDef name:FakeOptimizerForRestoration FunctionDef name:__init__ arg:self arg:optimizer arguments arg arg Assign FunctionDef name:get_slot_names arg:self arguments arg Return return:yes Call FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "flattened_inputs_and_outputs", + "source_code": "def flattened_inputs_and_outputs(self):\n\n def _flatten(input_or_output_dict):\n flattened_items = []\n for item in input_or_output_dict.values():\n flattened_items.extend(item.flatten())\n return flattened_items\n return (_flatten(self.inputs), _flatten(self.outputs))", + "docstring": "Return a list of inputs and outputs in a flattened format. Returns: Tuple of (inputs, outputs). where input and output i a list of names.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:flattened_inputs_and_outputs arg:self arguments arg FunctionDef name:_flatten arg:input_or_output_dict arguments arg Assign For Call Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_prepare_fit_binary", + "source_code": "def _prepare_fit_binary(est, y, i, input_dtype, label_encode=True):\n y_i = np.ones(y.shape, dtype=input_dtype, order='C')\n if label_encode:\n y_i[y != est.classes_[i]] = 0.0\n else:\n y_i[y != est.classes_[i]] = -1.0\n average_intercept = 0\n average_coef = None\n if len(est.classes_) == 2:\n if not est.average:\n coef = est.coef_.ravel()\n intercept = est.intercept_[0]\n else:\n coef = est._standard_coef.ravel()\n intercept = est._standard_intercept[0]\n average_coef = est._average_coef.ravel()\n average_intercept = est._average_intercept[0]\n elif not est.average:\n coef = est.coef_[i]\n intercept = est.intercept_[i]\n else:\n coef = est._standard_coef[i]\n intercept = est._standard_intercept[i]\n average_coef = est._average_coef[i]\n average_intercept = est._average_intercept[i]\n return (y_i, coef, intercept, average_coef, average_intercept)", + "docstring": "Initialization for fit_binary. Returns y, coef, intercept, average_coef, average_intercept.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:_prepare_fit_binary arg:est arg:y arg:i arg:input_dtype arg:label_encode arguments arg arg arg arg arg Assign Call If Assign Compare Assign Compare Assign Assign If Compare Call If Assign Call Assign Assign Call Assign Assign Call Assign If Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "print_readable", + "source_code": "def print_readable(self):\n self.split_gm.print_readable()", + "docstring": "Print the pipe in a human-readable format. This will print both the root pipe and each stage module.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", + "ast_data": "FunctionDef name:print_readable arg:self arguments arg Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n return self._decision_function(X)", + "docstring": "Predict using the linear model. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Samples. Returns ------- C : array, shape (n_samples,) Returns predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "transform_tensor", + "source_code": "def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor):\n return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths)", + "docstring": "Extension from the planner interface to make it easy to extend the default planner.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py", + "ast_data": "FunctionDef name:transform_tensor arg:self arg:read_item arg:tensor arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "full_clean", + "source_code": "def full_clean(self):\n self._errors = ErrorDict(renderer=self.renderer)\n if not self.is_bound:\n return\n self.cleaned_data = {}\n if self.empty_permitted and (not self.has_changed()):\n return\n self._clean_fields()\n self._clean_form()\n self._post_clean()", + "docstring": "Clean all of self.data and populate self._errors and self.cleaned_data.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:full_clean arg:self arguments arg Assign Call If Return return:no Assign If BoolOp Call Return return:no Call Call Call" + }, + { + "library": "matplotlib", + "name": "_get_interpolating_points", + "source_code": "@classmethod\ndef _get_interpolating_points(cls, t, f1, f2, idx):\n im1 = max(idx - 1, 0)\n t_values = t[im1:idx + 1]\n diff_values = f1[im1:idx + 1] - f2[im1:idx + 1]\n f1_values = f1[im1:idx + 1]\n if len(diff_values) == 2:\n if np.ma.is_masked(diff_values[1]):\n return (t[im1], f1[im1])\n elif np.ma.is_masked(diff_values[0]):\n return (t[idx], f1[idx])\n diff_root_t = cls._get_diff_root(0, diff_values, t_values)\n diff_root_f = cls._get_diff_root(diff_root_t, t_values, f1_values)\n return (diff_root_t, diff_root_f)", + "docstring": "Calculate interpolating points.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_get_interpolating_points arg:cls arg:t arg:f1 arg:f2 arg:idx arguments arg arg arg arg arg Assign Call Assign Assign Assign If Compare Call If Call Return return:yes If Call Return return:yes Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, images: Tensor) -> tuple[Tensor, Tensor]:\n feats = self.backbone(images)\n feats_buf = self.encoder(feats)\n logits, boxes = self.decoder(feats_buf)\n return (logits, boxes)", + "docstring": "Detect objects in an image. Args: images: images to be detected. Shape :math:. Returns: - **logits** - Tensor of shape :math:, where :math: is the number of queries, :math: is the number of classes. - **boxes** - Tensor of shape :math:, where :math: is the number of queries.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py", + "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "IntCastingNaNError", + "source_code": "class IntCastingNaNError(ValueError):\n pass", + "docstring": "Exception raised when converting (``) an array with NaN to an integer type. This error occurs when attempting to cast a data structure containing non-finite values (such as NaN or infinity) to an integer data type. Integer types do not support non-finite values, so such conversions are explicitly disallowed to prevent silent data corruption or unexpected behavior. See Also -------- DataFrame.astype : Method to cast a pandas DataFrame object to a specified dtype. Series.astype : Method to cast a pandas Series object to a specified dtype. Examples -------- >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype=\"i8\") Traceback (most recent call last): IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:IntCastingNaNError" + }, + { + "library": "tensorflow", + "name": "_is_all_finite", + "source_code": "def _is_all_finite(grads):\n\n def raw_values(g):\n return g.values if isinstance(g, indexed_slices.IndexedSlices) else g\n is_finite_per_grad = [math_ops.reduce_all(math_ops.is_finite(raw_values(g))) for g in grads if g is not None]\n return math_ops.reduce_all(is_finite_per_grad)", + "docstring": "Returns a scalar boolean tensor indicating if all gradients are finite.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:_is_all_finite arg:grads arguments arg FunctionDef name:raw_values arg:g arguments arg Return return:yes Call Assign Call Call Call Compare Return return:yes Call" + }, + { + "library": "pygame", + "name": "wait", + "source_code": "def wait():\n _ft_init_check()\n return pygame.event.wait()", + "docstring": "wait() -> Event wait for an event", + "type": "function", + "file_path": "pygame\\src_py\\fastevent.py", + "ast_data": "FunctionDef name:wait arguments Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "ntob", + "source_code": "def ntob(n, encoding='ISO-8859-1'):\n assert_native(n)\n return n.encode(encoding)", + "docstring": "Convert a native :class: to a :class: instance. The encoding can be changed to non-ASCII optionally.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpcompat.py", + "ast_data": "FunctionDef name:ntob arg:n arg:encoding arguments arg arg Call Return return:yes Call" + }, + { + "library": "django", + "name": "wkt", + "source_code": "@property\ndef wkt(self):\n return wkt_w(dim=3 if self.hasz else 2, trim=True).write(self).decode()", + "docstring": "Return the WKT (Well-Known Text) representation of this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:wkt arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "dispatch_strategy", + "source_code": "def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:\n if fn.func.is_abstract or (fn.info is not None and any((info.has_derivatives for info in fn.info.values()))):\n return 'use_derived'\n else:\n return 'use_type'", + "docstring": "How are we going to call the underlying implementation of a declaration? There are two strategies: - use_derived: we want to call the implementation on CPUDoubleType (or a similar, derived Type instance). Because these derived instances deal in Tensors, not Variables (it's a completely different object, so it doesn't dispatch back to VariableType), code on this dispatch path needs to wrap/unwrap tensors. If the derived implementation takes and returns tensors, the implementation is usually differentiable (although we also use the derived dispatch path for non-differentiable functions that we still want to dispatch on the derived Type instance; e.g., size()) - use_type: we want to call the implementation on Type, because it is implemented concretely, and the functions it invokes will get dispatched back to VariableType (which will ensure that they are differentiable.)", + "type": "function", + "file_path": "pytorch\\torchgen\\api\\autograd.py", + "ast_data": "FunctionDef name:dispatch_strategy arg:fn arguments arg If BoolOp BoolOp Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_valid_dtypes", + "source_code": "def _valid_dtypes(self):\n return set([dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])", + "docstring": "Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_valid_dtypes arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_execute_dependency_graph", + "source_code": "def _execute_dependency_graph(self):\n self._validate_dependency_graph()\n extern_modules = []\n for module_name, attrs in self.dependency_graph.nodes.items():\n action = attrs['action']\n if action == _ModuleProviderAction.EXTERN:\n for hook in self._extern_hooks.values():\n hook(self, module_name)\n extern_modules.append(module_name)\n elif action == _ModuleProviderAction.MOCK:\n for hook in self._mock_hooks.values():\n hook(self, module_name)\n self._write_mock_file()\n is_package = hasattr(self._import_module(module_name), '__path__')\n self._write_source_string(module_name, _MOCK_IMPL, is_package)\n elif action == _ModuleProviderAction.INTERN:\n for hook in self._intern_hooks.values():\n hook(self, module_name)\n if 'provided' not in attrs:\n raise AssertionError(f'Module was marked `intern` but not provided: {module_name}')\n if attrs.get('is_pickle') is True:\n continue\n is_package = attrs['is_package']\n source = attrs['source']\n self._write_source_string(module_name, source, is_package)\n elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE:\n self._write_mock_file()\n elif action == _ModuleProviderAction.SKIP:\n continue\n else:\n raise AssertionError(f'Invalid action: {module_name}, {action}. Please report a bug to PyTorch.')\n extern_file_contents = '\\n'.join(extern_modules) + '\\n'\n self._write('.data/extern_modules', extern_file_contents)", + "docstring": "Takes a finalized dependency graph describing how to package all modules and executes it, writing to the ZIP archive.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:_execute_dependency_graph arg:self arguments arg Call Assign For Call Assign If Compare For Call Call Call If Compare For Call Call Call Assign Call Call Call If Compare For Call Call If Compare Raise Call If Compare Call Assign Assign Call If Compare Call If Compare Raise Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_get_dtype_from_nested_lists", + "source_code": "def _get_dtype_from_nested_lists(list_or_tuple):\n for elem in list_or_tuple:\n if isinstance(elem, core.Tensor):\n return elem.dtype.base_dtype\n elif isinstance(elem, (list, tuple)):\n maybe_dtype = _get_dtype_from_nested_lists(elem)\n if maybe_dtype is not None:\n return maybe_dtype\n return None", + "docstring": "Returns the dtype of any tensor-like object in , if found. Args: list_or_tuple: A list or tuple representing an object that can be converted to a . Returns: The dtype of any tensor-like object in , or if no such object exists.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:_get_dtype_from_nested_lists arg:list_or_tuple arguments arg For If Call Return return:yes If Call Assign Call If Compare Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "logits", + "source_code": "@property\ndef logits(self):\n return self._logits", + "docstring": "Vector of coordinatewise logits.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py", + "ast_data": "FunctionDef name:logits arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "xpdf_distill", + "source_code": "def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):\n mpl._get_executable_info('gs')\n mpl._get_executable_info('pdftops')\n if eps:\n paper_option = ['-dEPSCrop']\n elif ptype == 'figure':\n paper_option = [f'-dDEVICEWIDTHPOINTS#{bbox[2]}', f'-dDEVICEHEIGHTPOINTS#{bbox[3]}']\n else:\n paper_option = [f'-sPAPERSIZE#{ptype}']\n with TemporaryDirectory() as tmpdir:\n tmppdf = pathlib.Path(tmpdir, 'tmp.pdf')\n tmpps = pathlib.Path(tmpdir, 'tmp.ps')\n cbook._check_and_log_subprocess(['ps2pdf', '-dAutoFilterColorImages#false', '-dAutoFilterGrayImages#false', '-sAutoRotatePages#None', '-sGrayImageFilter#FlateEncode', '-sColorImageFilter#FlateEncode', *paper_option, tmpfile, tmppdf], _log)\n cbook._check_and_log_subprocess(['pdftops', '-paper', 'match', '-level3', tmppdf, tmpps], _log)\n shutil.move(tmpps, tmpfile)\n if eps:\n pstoeps(tmpfile)", + "docstring": "Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file. This yields smaller files without illegal encapsulated postscript operators. This distiller is preferred, generating high-level postscript output that treats text as text.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", + "ast_data": "FunctionDef name:xpdf_distill arg:tmpfile arg:eps arg:ptype arg:bbox arg:rotated arguments arg arg arg arg arg Call Call If Assign If Compare Assign Assign With Call Assign Call Assign Call Call Call Call If Call" + }, + { + "library": "tensorflow", + "name": "cache_variable_reads", + "source_code": "@contextlib.contextmanager\ndef cache_variable_reads():\n try:\n if caching_scope_local.in_caching_scope():\n raise ValueError('cache_variable_reads scope cannot be nested')\n caching_scope_local.enter_scope()\n yield\n finally:\n caching_scope_local.exit_scope()", + "docstring": "Scope for caching variable reads for AggregatingVariable. The variable reads for AggregatingVariable inside this scope are cached. i.e. the first read of variable reads the value from possibly remote handle, but subsequent reads are returned using local cached value. For example: strategy = ParameterServerStrategy... with strategy.scope(): # Variable v is of AggregatingVariable type with actual variable residing # on PS. v = tf.Variable(1.0) with distribute_utils.cache_variable_reads(): v.read_value() # Reads value 1.0 v.assign(constant_op.constant(5.0)) # v changes to 5.0 t1 = v.read_value() t2 = v.read_value() # Both t1 & t2 return cached value 1.0 from local CPU. Notes about cache_variable_reads scope: 1. Nesting of scope cache_variable_reads() is not supported 2. And when caching scope is enabled, the thread enabling the cache and mirrored_run._MirroredReplicaThread threads spawned from it will have caching enabled. Yields: A context for caching variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py", + "ast_data": "FunctionDef name:cache_variable_reads arguments Try If Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "res_call", + "source_code": "def res_call(self, ns, types_ns, node, f_type, args, keywords):\n raise NotImplementedError('subclasses must implement')", + "docstring": "Resolves the return type an external function or method call. Args: ns: namespace types_ns: types namespace node: str, the function name f_type: types of the actual function being called, if known args: types of each respective argument in node.args keywords: types of each respective argument in node.keywords Returns: Tuple (return_type, side_effect_types). The first element is just the return types of the function. The second element is a map from argument names to sets of types, and allow modelling side effects of functions (for example via global or nonlocal).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py", + "ast_data": "FunctionDef name:res_call arg:self arg:ns arg:types_ns arg:node arg:f_type arg:args arg:keywords arguments arg arg arg arg arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "EMPTY_NN_MODULE_HOOKS_DICT", + "source_code": "def EMPTY_NN_MODULE_HOOKS_DICT(self, guard):\n if config.skip_nnmodule_hook_guards:\n return\n self.SEQUENCE_LENGTH(guard)", + "docstring": "Special guard to skip guards on empty hooks. This is controlled by skip_nnmodule_hook_guards", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:EMPTY_NN_MODULE_HOOKS_DICT arg:self arg:guard arguments arg arg If Return return:no Call" + }, + { + "library": "tensorflow", + "name": "_get_all_paths", + "source_code": "def _get_all_paths(st):\n fields = st.field_names()\n all_paths = {()}\n for k in fields:\n v = st.field_value(k)\n if isinstance(v, StructuredTensor):\n all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)])\n else:\n all_paths.add((k,))\n return all_paths", + "docstring": "Get all the paths from a StructuredTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_get_all_paths arg:st arguments arg Assign Call Assign For Assign Call If Call Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_topmost_subplotspec", + "source_code": "def get_topmost_subplotspec(self):\n gridspec = self.get_gridspec()\n if hasattr(gridspec, 'get_topmost_subplotspec'):\n return gridspec.get_topmost_subplotspec()\n else:\n return self", + "docstring": "Return the topmost instance associated with the subplot.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:get_topmost_subplotspec arg:self arguments arg Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "parse_message", + "source_code": "def parse_message(message):\n error_message = []\n func_tags = []\n node_tags = []\n pos = 0\n for match in re.finditer(_INTERPOLATION_PATTERN, message):\n parsed_tag = _ParseTag(match.group('type'), match.group('name'))\n if parsed_tag.type == 'function_node':\n error_message.append(match.group('sep'))\n func_tags.append(parsed_tag)\n else:\n error_message.append(match.group())\n node_tags.append(parsed_tag)\n pos = match.end()\n error_message.append(message[pos:])\n return (''.join(error_message), func_tags, node_tags)", + "docstring": "Extract function tags and node tags from a message. Tags are named tuples representing the string {{type name}}. For example, in \"123{{node Foo}}456{{function_node Bar}}789\", there are two tags: a node tag and a function tag. Args: message: An error message, possibly from an OpError. Returns: A tuple containing the original message with function nodes stripped, function tags, and node tags. For example, if message is \"123{{node Foo}}456{{function_node Bar}}789\" then this function returns (\"123{{node Foo}}456789\", [_ParseTag(\"function_node\", \"Bar\")], [_ParseTag(\"node\", \"Foo\")]).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py", + "ast_data": "FunctionDef name:parse_message arg:message arguments arg Assign Assign Assign Assign For Call Assign Call Call Call If Compare Call Call Call Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "collect_producer_nodes", + "source_code": "def collect_producer_nodes(node: Node) -> Optional[list[Node]]:\n nodes = [node]\n frontier = [node]\n while frontier:\n node = frontier.pop()\n all_args = list(node.args) + list(node.kwargs.values())\n for arg in all_args:\n if not isinstance(arg, Node):\n continue\n if arg.op == 'placeholder':\n return None\n nodes.append(arg)\n if not (arg.op == 'call_function' and arg.target == getattr):\n frontier.append(arg)\n return nodes", + "docstring": "Starting from a target node, trace back until we hit inpu or getattr node. This is used to extract the chain of operators starting from getattr to the target node, for example def forward(self, x): observed = self.observer(self.weight) return F.linear(x, observed) collect_producer_nodes(observed) will either return a list of nodes that produces the observed node or None if we can't extract a self contained graph without free variables(inputs of the forward function).", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", + "ast_data": "FunctionDef name:collect_producer_nodes arg:node arguments arg Assign Assign While Assign Call Assign Call Call Call For If Call If Compare Return return:no Call If BoolOp Compare Compare Call Return return:yes" + }, + { + "library": "authlib", + "name": "validate_token_request", + "source_code": "def validate_token_request(self):\n device_code = self.request.payload.data.get('device_code')\n if not device_code:\n raise InvalidRequestError(\"Missing 'device_code' in payload\")\n client = self.authenticate_token_endpoint_client()\n if not client.check_grant_type(self.GRANT_TYPE):\n raise UnauthorizedClientError(f\"The client is not authorized to use 'response_type={self.GRANT_TYPE}'\")\n credential = self.query_device_credential(device_code)\n if not credential:\n raise InvalidRequestError(\"Invalid 'device_code' in payload\")\n if credential.get_client_id() != client.get_client_id():\n raise UnauthorizedClientError()\n user = self.validate_device_credential(credential)\n self.request.user = user\n self.request.client = client\n self.request.credential = credential", + "docstring": "After displaying instructions to the user, the client creates an access token request and sends it to the token endpoint with the following parameters: grant_type REQUIRED. Value MUST be set to \"urn:ietf:params:oauth:grant-type:device_code\". device_code REQUIRED. The device verification code, \"device_code\" from the device authorization response. client_id REQUIRED if the client is not authenticating with the authorization server as described in Section 3.2.1. of [RFC6749]. The client identifier as described in Section 2.2 of [RFC6749]. For example, the client makes the following HTTPS request:: POST /token HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code &device_code=GmRhmhcxhwAzkoEqiMEg_DnyEysNkuNhszIySk9eS &client_id=1406020730", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py", + "ast_data": "FunctionDef name:validate_token_request arg:self arguments arg Assign Call If Raise Call Assign Call If Call Raise Call Assign Call If Raise Call If Compare Call Call Raise Call Assign Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "__same_types", + "source_code": "@staticmethod\ndef __same_types(a, b):\n if nest.is_namedtuple(a) and nest.is_namedtuple(b):\n return nest.same_namedtuples(a, b)\n else:\n return type(a) is type(b)", + "docstring": "Returns whether a and b have the same type, up to namedtuple equivalence. Consistent with tf.nest.assert_same_structure(), two namedtuple types are considered the same iff they agree in their class name (without qualification by module name) and in their sequence of field names. This makes namedtuples recreated by nested_structure_coder compatible with their original Python definition. Args: a: a Python object. b: a Python object. Returns: A boolean that is true iff type(a) and type(b) are the same object or equivalent namedtuple types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:__same_types arg:a arg:b arguments arg arg If BoolOp Call Call Return return:yes Call Return return:yes Compare Call Call" + }, + { + "library": "tensorflow", + "name": "task_type", + "source_code": "@task_type.setter\ndef task_type(self, task_type):\n self._task_type = task_type", + "docstring": "Setter of property. See property doc.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", + "ast_data": "FunctionDef name:task_type arg:self arg:task_type arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "ReferenceVariableSaveable", + "source_code": "class ReferenceVariableSaveable(saveable_object.SaveableObject):\n\n def __init__(self, var, slice_spec, name):\n spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)\n super(ReferenceVariableSaveable, self).__init__(var, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n restored_tensor = restored_tensors[0]\n if restored_shapes is not None:\n restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])\n return state_ops.assign(self.op, restored_tensor, validate_shape=restored_shapes is None and self.op.get_shape().is_fully_defined())", + "docstring": "SaveableObject implementation that handles reference variables.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "ClassDef name:ReferenceVariableSaveable FunctionDef name:__init__ arg:self arg:var arg:slice_spec arg:name arguments arg arg arg arg Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign If Compare Assign Call Return return:yes Call BoolOp Compare Call Call" + }, + { + "library": "kornia", + "name": "fx", + "source_code": "@property\ndef fx(self) -> Tensor:\n return self.intrinsics[..., 0, 0]", + "docstring": "Return the focal length in the x-direction. Returns: tensor of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:fx arg:self arguments arg Return return:yes" + }, + { + "library": "cryptography", + "name": "sign", + "source_code": "@abc.abstractmethod\ndef sign(self, data: Buffer) -> bytes:\n pass", + "docstring": "Signs the data.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:sign arg:self arg:data arguments arg arg" + }, + { + "library": "tensorflow", + "name": "max_error", + "source_code": "def max_error(grad1, grad2):\n error = 0\n for j_t, j_n in zip(grad1, grad2):\n if j_t.size or j_n.size:\n error = np.maximum(error, np.fabs(j_t - j_n).max())\n return error", + "docstring": "Computes maximum elementwise gap. Computes the maximum elementwise gap between two lists of tensors of the same shape. Args: grad1: a lists of tensors. grad2: a lists of tensors with the same shape as grad1. Returns: The maximum elementwise gap between the two.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py", + "ast_data": "FunctionDef name:max_error arg:grad1 arg:grad2 arguments arg arg Assign For Call If BoolOp Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "bool", + "source_code": "def bool(self):\n _warn_typed_storage_removal()\n return self._to(torch.bool)", + "docstring": "Casts this storage to bool type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:bool arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "all_node_args_except_first", + "source_code": "def all_node_args_except_first(node: Node) -> list[int]:\n return list(range(1, len(node.args)))", + "docstring": "Returns all node arg indices after first", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", + "ast_data": "FunctionDef name:all_node_args_except_first arg:node arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "ObjectGraphProtoPrettyPrinter", + "source_code": "class ObjectGraphProtoPrettyPrinter:\n __slots__ = ['_object_graph_proto', '_node_name_cache']\n\n def __init__(self, object_graph_proto):\n self._object_graph_proto = object_graph_proto\n self._node_name_cache = None\n\n @property\n def node_names(self):\n if self._node_name_cache is not None:\n return self._node_name_cache\n path_to_root = {}\n path_to_root[0] = ('(root)',)\n to_visit = collections.deque([0])\n while to_visit:\n node_id = to_visit.popleft()\n obj = self._object_graph_proto.nodes[node_id]\n for child in obj.children:\n if child.node_id not in path_to_root:\n path_to_root[child.node_id] = path_to_root[node_id] + (child.local_name,)\n to_visit.append(child.node_id)\n node_names = {}\n for node_id, path_to_root in path_to_root.items():\n node_names[node_id] = '.'.join(path_to_root)\n for node_id, node in enumerate(self._object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n node_names[slot_reference.slot_variable_node_id] = f\"{node_names[node_id]}'s state '{slot_reference.slot_name}' for {node_names[slot_reference.original_variable_node_id]}\"\n self._node_name_cache = node_names\n return node_names", + "docstring": "Lazily traverses an object graph proto to pretty print names. If no calls to are made this object has no performance overhead. On the other hand, it will only traverse the object graph once, so repeated naming is cheap after the first.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "ClassDef name:ObjectGraphProtoPrettyPrinter Assign FunctionDef name:__init__ arg:self arg:object_graph_proto arguments arg arg Assign Assign FunctionDef name:node_names arg:self arguments arg If Compare Return return:yes Assign Assign Assign Call While Assign Call Assign For If Compare Assign Call Assign For Call Assign Call For Call For Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_type_spec_from_value", + "source_code": "def _type_spec_from_value(value) -> TypeSpec:\n if isinstance(value, core_types.Symbol):\n return trace_type.from_value(value)\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec\n if isinstance(value, list) and value:\n subspecs = [_type_spec_from_value(v) for v in value]\n if isinstance(subspecs[0], BatchableTypeSpec):\n merged_subspec = subspecs[0].most_specific_common_supertype(subspecs[1:])\n if merged_subspec is not None:\n return merged_subspec._batch(len(subspecs))\n for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):\n type_object, converter_fn, allow_subclass = entry\n if type(value) is type_object or (allow_subclass and isinstance(value, type_object)):\n return converter_fn(value)\n return None", + "docstring": "Returns a that represents the given .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_type_spec_from_value arg:value arguments arg If Call Return return:yes Call If Call Return return:yes If BoolOp Call Assign Call If Call Assign Call If Compare Return return:yes Call Call For Call Assign If BoolOp Compare Call BoolOp Call Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "get_v2_names", + "source_code": "def get_v2_names(symbol: Any) -> Sequence[str]:\n names_v2 = []\n tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names\n keras_api_attr = API_ATTRS[KERAS_API_NAME].names\n if not hasattr(symbol, '__dict__'):\n return names_v2\n if tensorflow_api_attr in symbol.__dict__:\n names_v2.extend(getattr(symbol, tensorflow_api_attr))\n if keras_api_attr in symbol.__dict__:\n names_v2.extend(getattr(symbol, keras_api_attr))\n return names_v2", + "docstring": "Get a list of TF 2.0 names for this symbol. Args: symbol: symbol to get API names for. Returns: List of all API names for this symbol.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py", + "ast_data": "FunctionDef name:get_v2_names arg:symbol arguments arg Assign Assign Assign If Call Return return:yes If Compare Call Call If Compare Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_xdg_cache_dir", + "source_code": "def _get_xdg_cache_dir():\n return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / '.cache')", + "docstring": "Return the XDG cache directory, according to the XDG base directory spec:", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:_get_xdg_cache_dir arguments Return return:yes BoolOp Call Call Call" + }, + { + "library": "authlib", + "name": "register_signature_method", + "source_code": "@classmethod\ndef register_signature_method(cls, name, sign):\n cls.SIGNATURE_METHODS[name] = sign", + "docstring": "Extend client signature methods. :param name: A string to represent signature method. :param sign: A function to generate signature. The `` method accept 2 parameters:: def custom_sign_method(client, request): # client is the instance of Client. return \"your-signed-string\" Client.register_signature_method(\"custom-name\", custom_sign_method)", + "type": "method", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py", + "ast_data": "FunctionDef name:register_signature_method arg:cls arg:name arg:sign arguments arg arg arg Assign" + }, + { + "library": "scrapy", + "name": "get_spec", + "source_code": "def get_spec(func: Callable[..., Any]) -> tuple[list[str], dict[str, Any]]:\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getfullargspec(func)\n elif hasattr(func, '__call__'):\n spec = inspect.getfullargspec(func.__call__)\n else:\n raise TypeError(f'{type(func)} is not callable')\n defaults: tuple[Any, ...] = spec.defaults or ()\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return (args, kwargs)", + "docstring": "Returns (args, kwargs) tuple for a function >>> import re >>> get_spec(re.match) (['pattern', 'string'], {'flags': 0}) >>> class Test: ... def __call__(self, val): ... pass ... def method(self, val, flags=0): ... pass >>> get_spec(Test) (['self', 'val'], {}) >>> get_spec(Test.method) (['self', 'val'], {'flags': 0}) >>> get_spec(Test().method) (['self', 'val'], {'flags': 0})", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:get_spec arg:func arguments arg If BoolOp Call Call Assign Call If Call Assign Call Raise Call Call BoolOp Assign Call Call Assign Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "update_scalarmappable", + "source_code": "def update_scalarmappable(self):\n if not self._set_mappable_flags():\n return\n if self._A is not None:\n if self._A.ndim > 1 and (not isinstance(self, _MeshData)):\n raise ValueError('Collections can only map rank 1 arrays')\n if np.iterable(self._alpha):\n if self._alpha.size != self._A.size:\n raise ValueError(f'Data array shape, {self._A.shape} is incompatible with alpha array shape, {self._alpha.shape}. This can occur with the deprecated behavior of the \"flat\" shading option, in which a row and/or column of the data array is dropped.')\n self._alpha = self._alpha.reshape(self._A.shape)\n self._mapped_colors = self.to_rgba(self._A, self._alpha)\n if self._face_is_mapped:\n self._facecolors = self._mapped_colors\n else:\n self._set_facecolor(self._original_facecolor)\n if self._edge_is_mapped:\n self._edgecolors = self._mapped_colors\n else:\n self._set_edgecolor(self._original_edgecolor)\n self.stale = True", + "docstring": "Update colors from the scalar mappable array, if any. Assign colors to edges and faces based on the array and/or colors that were directly set, as appropriate.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:update_scalarmappable arg:self arguments arg If Call Return return:no If Compare If BoolOp Compare Call Raise Call If Call If Compare Raise Call Assign Call Assign Call If Assign Call If Assign Call Assign" + }, + { + "library": "scipy", + "name": "braycurtis", + "source_code": "def braycurtis(u, v, w=None):\n u = _validate_vector(u)\n v = _validate_vector(v, dtype=np.float64)\n l1_diff = abs(u - v)\n l1_sum = abs(u + v)\n if w is not None:\n w = _validate_weights(w)\n l1_diff = w * l1_diff\n l1_sum = w * l1_sum\n return l1_diff.sum() / l1_sum.sum()", + "docstring": "Compute the Bray-Curtis distance between two 1-D arrays. Bray-Curtis distance is defined as .. math:: \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|} The Bray-Curtis distance is in the range [0, 1] if all coordinates are positive, and is undefined if the inputs are of length zero. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- braycurtis : double The Bray-Curtis distance between 1-D arrays and . Examples -------- >>> from scipy.spatial import distance >>> distance.braycurtis([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.braycurtis([1, 1, 0], [0, 1, 0]) 0.33333333333333331", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:braycurtis arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Assign Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "device", + "source_code": "def device(self) -> torch.device:\n return self._intrinsics.device", + "docstring": "Return the device for camera buffers. Returns: Device type", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, input: Tensor, params: Optional[List[ParamItem]]=None, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if len(input.shape) != 5:\n raise AssertionError(f'Input must be a 5-dim tensor. Got {input.shape}.')\n if params is None:\n self._params = self.forward_parameters(input.shape)\n params = self._params\n output = self.transform_inputs(input, params, extra_args=extra_args)\n return output", + "docstring": "Define the video computation performed.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:forward arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Compare Call Raise Call If Compare Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "num_geom", + "source_code": "@property\ndef num_geom(self):\n return capi.get_num_geoms(self.ptr)", + "docstring": "Return the number of geometries in the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:num_geom arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_add_a_b", + "source_code": "def _add_a_b(tests):\n for d in tests:\n for k, v in zip(['a', 'b'], d.get('bracket', [])):\n d[k] = v", + "docstring": "Add \"a\" and \"b\" keys to each test from the \"bracket\" value", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:_add_a_b arg:tests arguments arg For For Call Call Assign" + }, + { + "library": "pytorch", + "name": "_traced_graph_from_model", + "source_code": "def _traced_graph_from_model(model: torch.nn.Module | torch.jit.ScriptModule, args: tuple[Any, ...], kwargs: Mapping[str, Any], export_options: _experimental.ExportOptions) -> _C.Graph:\n training = export_options.training\n verbose = export_options.verbose\n with utils.exporter_context(model, training, verbose):\n export_inputs = _prepare_input_for_export(args, kwargs)\n model = utils._pre_trace_quant_model(model, export_inputs)\n jit_graph, _, _, _ = utils._create_jit_graph(model, export_inputs)\n return jit_graph", + "docstring": "As part of the ONNX export steps, create a traced JIT graph from a PyTorch model. Args: model: See :func:. args: See :func:. kwargs: See :func:. export_options: See :func:. Returns: jit_graph (_C.Graph): A traced JIT graph.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_traced_graph_from_model arg:model arg:args arg:kwargs arg:export_options arguments arg arg arg arg Assign Assign With Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "internal_convert_n_to_tensor", + "source_code": "def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None) -> list[Union[EagerTensor, SymbolicTensor]]:\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError('values must be a sequence.')\n ret = []\n for i, value in enumerate(values):\n n = None if name is None else '%s_%d' % (name, i)\n ret.append(convert_to_tensor(value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype))\n return ret", + "docstring": "Converts to a list of objects. Args: values: A list of objects that can be consumed by . dtype: (Optional.) The required of the returned objects. name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to is not possible, this argument has no effect. ctx: Unused. Present for API backwards compatibility. Returns: A list of and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:internal_convert_n_to_tensor arg:values arg:dtype arg:name arg:as_ref arg:preferred_dtype arg:ctx arguments arg arg arg arg arg arg If Call Raise Call Assign For Call Assign Compare Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "getLogger", + "source_code": "def getLogger(name: str) -> SphinxLoggerAdapter:\n logger = logging.getLogger(NAMESPACE + '.' + name)\n logger.disabled = False\n return SphinxLoggerAdapter(logger, {})", + "docstring": "Get logger wrapped by :class:. Sphinx logger always uses `` namespace to be independent from settings of root logger. It ensures logging is consistent even if a third-party extension or imported application resets logger settings. Example usage:: >>> from sphinx.util import logging >>> logger = logging.getLogger(__name__) >>> logger.info('Hello, this is an extension!') Hello, this is an extension!", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "FunctionDef name:getLogger arg:name arguments arg Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "install_guard", + "source_code": "def install_guard(*guards, skip=0):\n from torch._guards import TracingContext\n collect_debug_stack = guards_log.isEnabledFor(logging.DEBUG) or verbose_guards_log.isEnabledFor(logging.DEBUG)\n add = TracingContext.get().guards_context.dynamo_guards.add\n for guard in guards:\n assert isinstance(guard, Guard)\n add(guard, collect_debug_stack=collect_debug_stack, skip=skip + 1)", + "docstring": "Add dynamo guards to the current tracing context. Args: guards: guard(s) to add skip: number of stack frames to ignore for debug stack trace", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:install_guard arguments arg arg Assign BoolOp Call Call Assign Call For Call Call" + }, + { + "library": "kornia", + "name": "max_blur_pool2d", + "source_code": "def max_blur_pool2d(input: Tensor, kernel_size: tuple[int, int] | int, stride: int=2, max_pool_size: int=2, ceil_mode: bool=False) -> Tensor:\n KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n kernel = get_pascal_kernel_2d(kernel_size, norm=True, device=input.device, dtype=input.dtype).repeat((input.shape[1], 1, 1, 1))\n return _max_blur_pool_by_kernel2d(input, kernel, stride, max_pool_size, ceil_mode)", + "docstring": "Compute pools and blurs and downsample a given feature map. .. image:: _static/img/max_blur_pool2d.png See :class: for details. Args: input: tensor to apply operation to. kernel_size: the kernel size for max pooling. stride: stride for pooling. max_pool_size: the kernel size for max pooling. ceil_mode: should be true to match output size of conv2d with same kernel size. .. note:: This function is tested against .. note:: See a working example __. Examples: >>> input = torch.eye(5)[None, None] >>> max_blur_pool2d(input, 3) tensor([[[[0.5625, 0.3125], [0.3125, 0.8750]]]])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\blur_pool.py", + "ast_data": "FunctionDef name:max_blur_pool2d arg:input arg:kernel_size arg:stride arg:max_pool_size arg:ceil_mode arguments arg arg arg arg arg Call Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "headers", + "source_code": "@property\ndef headers(self) -> Sequence[str]:\n if self.with_counts:\n return ['Non-Null Count', 'Dtype']\n return ['Dtype']", + "docstring": "Headers names of the columns in verbose table.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:headers arg:self arguments arg If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "wrap_cpp_class", + "source_code": "def wrap_cpp_class(cpp_class):\n return torch.jit.RecursiveScriptClass(cpp_class)", + "docstring": "Wrap this torch._C.Object in a Python RecursiveScriptClass.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_recursive.py", + "ast_data": "FunctionDef name:wrap_cpp_class arg:cpp_class arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "fp16_compress_wrapper", + "source_code": "def fp16_compress_wrapper(hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:\n\n def fp16_compress_wrapper_hook(hook_state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n bucket.set_buffer(bucket.buffer().to(torch.float16))\n fut = hook(hook_state, bucket)\n\n def decompress(fut):\n decompressed_tensor = bucket.buffer()\n decompressed_tensor.copy_(fut.value())\n return decompressed_tensor\n return fut.then(decompress)\n return fp16_compress_wrapper_hook", + "docstring": "Cast input tensor to ``. Example:: >>> # xdoctest: +SKIP >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10) >>> ddp_model.register_comm_hook(state, fp16_compress_wrapper(powerSGD_hook))", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py", + "ast_data": "FunctionDef name:fp16_compress_wrapper arg:hook arguments arg FunctionDef name:fp16_compress_wrapper_hook arg:hook_state arg:bucket arguments arg arg Call Call Call Assign Call FunctionDef name:decompress arg:fut arguments arg Assign Call Call Call Return return:yes Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "clf", + "source_code": "def clf(self, keep_observers=False):\n return self.clear(keep_observers=keep_observers)", + "docstring": "[*Discouraged*] Alias for the method. .. admonition:: Discouraged The use of `` instead. Parameters ---------- keep_observers : bool, default: False Set *keep_observers* to True if, for example, a gui widget is tracking the Axes in the figure.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:clf arg:self arg:keep_observers arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "type", + "source_code": "@property\ndef type(self):\n return capi.get_field_type(self.ptr)", + "docstring": "Return the OGR type of this Field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:type arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_pre_load_state_dict_hook", + "source_code": "@staticmethod\ndef _pre_load_state_dict_hook(module: nn.Module, state_dict: dict[str, Any], prefix: str, *args: Any) -> None:\n _replace_by_prefix(state_dict, prefix, prefix + f'{_CHECKPOINT_PREFIX}')", + "docstring": "` is called before ``, it will add back the module prefix so that non-checkpointed modules can be loaded into checkpoint_wrapper modules properly.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py", + "ast_data": "FunctionDef name:_pre_load_state_dict_hook arg:module arg:state_dict arg:prefix arguments arg arg arg arg Call" + }, + { + "library": "cherrypy", + "name": "output", + "source_code": "def output(self):\n return list(self.encode_header_items(self.items()))", + "docstring": "Transform self into a list of (name, value) tuples.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:output arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "multi_margin_loss", + "source_code": "def multi_margin_loss(input: Tensor, target: Tensor, p: int=1, margin: float=1.0, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(multi_margin_loss, (input, target, weight), input, target, p=p, margin=margin, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n if p != 1 and p != 2:\n raise ValueError('only p == 1 and p == 2 supported')\n if weight is not None:\n if weight.dim() != 1:\n raise ValueError('weight must be one-dimensional')\n return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)", + "docstring": "Compute the multi margin loss, with optional weighting. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. p (int, optional): Has a default value of 1. 1 and 2 are the only supported values. margin (float, optional): Margin for multi margin loss. Has a default value of 1. weight (Tensor, optional): Weights for each sample. Default: None. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Multi margin loss (optionally weighted).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:multi_margin_loss arg:input arg:target arg:p arg:margin arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If BoolOp Compare Compare Raise Call If Compare If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "in_top_k_v2", + "source_code": "@tf_export('math.in_top_k', 'nn.in_top_k', v1=[])\n@dispatch.add_dispatch_support\ndef in_top_k_v2(targets, predictions, k, name=None):\n return in_top_k(predictions, targets, k, name)", + "docstring": "Outputs whether the targets are in the top predictions. This outputs a bool array, an entry is if the prediction for the target class is finite (not inf, -inf, or nan) and among the top predictions among all predictions for example . does not have to be normalized. Note that the behavior of differs from the op in its handling of ties; if multiple classes have the same prediction value and straddle the top- boundary, all of those classes are considered to be in the top . >>> target = tf.constant([0, 1, 3]) >>> pred = tf.constant([ ... [1.2, -0.3, 2.8, 5.2], ... [0.1, 0.0, 0.0, 0.0], ... [0.0, 0.5, 0.3, 0.3]], ... dtype=tf.float32) >>> print(tf.math.in_top_k(target, pred, 2)) tf.Tensor([False True True], shape=(3,), dtype=bool) Args: targets: A vector of class ids. Must be or . predictions: A x tensor of type . k: An . The parameter to specify search space. name: A name for the operation (optional). Returns: A with the same shape of with type of . Each element specifies if the target falls into top-k predictions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:in_top_k_v2 arg:targets arg:predictions arg:k arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "preprocess_data", + "source_code": "def preprocess_data(data) -> io.StringIO | io.BytesIO:\n if isinstance(data, str):\n data = io.StringIO(data)\n elif isinstance(data, bytes):\n data = io.BytesIO(data)\n return data", + "docstring": "Convert extracted raw data. This method will return underlying data of extracted XML content. The data either has a attribute (e.g. a file object or a StringIO/BytesIO) or is a string or bytes that is an XML document.", + "type": "function", + "file_path": "pandas\\pandas\\io\\xml.py", + "ast_data": "FunctionDef name:preprocess_data arg:data arguments arg If Call Assign Call If Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_alpha_vec", + "source_code": "@staticmethod\ndef _get_alpha_vec(x, y, tris_pts):\n ndim = tris_pts.ndim - 2\n a = tris_pts[:, 1, :] - tris_pts[:, 0, :]\n b = tris_pts[:, 2, :] - tris_pts[:, 0, :]\n abT = np.stack([a, b], axis=-1)\n ab = _transpose_vectorized(abT)\n OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]\n metric = ab @ abT\n metric_inv = _pseudo_inv22sym_vectorized(metric)\n Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim))\n ksi = metric_inv @ Covar\n alpha = _to_matrix_vectorized([[1 - ksi[:, 0, 0] - ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])\n return alpha", + "docstring": "Fast (vectorized) function to compute barycentric coordinates alpha. Parameters ---------- x, y : array-like of dim 1 (shape (nx,)) Coordinates of the points whose points barycentric coordinates are requested. tris_pts : array like of dim 3 (shape: (nx, 3, 2)) Coordinates of the containing triangles apexes. Returns ------- array of dim 2 (shape (nx, 3)) Barycentric coordinates of the points inside the containing triangles.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_get_alpha_vec arg:x arg:y arg:tris_pts arguments arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Call Call Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "generate_decl_wrapper", + "source_code": "def generate_decl_wrapper(name, return_type, argnames, argtypes, accelerate):\n if name in WRAPPED_FUNCS:\n return ''\n if accelerate and name in USE_OLD_ACCELERATE:\n return ''\n c_return_type = C_TYPES[return_type]\n c_argtypes = [C_TYPES[t] for t in argtypes]\n param_list = ', '.join((f'{t} *{n}' for t, n in zip(c_argtypes, argnames)))\n argnames = ', '.join(argnames)\n blas_macro, blas_name = get_blas_macro_and_name(name, accelerate)\n return f'\\n{c_return_type} {blas_macro}({blas_name})({param_list});\\n{c_return_type} F_FUNC({name},{name.upper()})({param_list}){{\\n return {blas_macro}({blas_name})({argnames});\\n}}\\n'", + "docstring": "Create wrapper function declaration. Wrapper has symbol and wraps the BLAS/LAPACK function (by default: ).", + "type": "function", + "file_path": "scipy\\scipy\\_build_utils\\_generate_blas_wrapper.py", + "ast_data": "FunctionDef name:generate_decl_wrapper arg:name arg:return_type arg:argnames arg:argtypes arg:accelerate arguments arg arg arg arg arg If Compare Return return:yes If BoolOp Compare Return return:yes Assign Assign Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "contains_saved_model", + "source_code": "@tf_export('saved_model.contains_saved_model', v1=[])\ndef contains_saved_model(export_dir):\n if isinstance(export_dir, os.PathLike):\n export_dir = os.fspath(export_dir)\n return maybe_saved_model_directory(export_dir)", + "docstring": "Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns , the export directory definitely does not contain a SavedModel. If the method returns , the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:contains_saved_model arg:export_dir arguments arg If Call Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_subtype_with_str", + "source_code": "@property\ndef _subtype_with_str(self):\n if isinstance(self.fill_value, str):\n return type(self.fill_value)\n return self.subtype", + "docstring": "Whether the SparseDtype's subtype should be considered ``, we need to be more specific, we need the actual underlying type. Returns ------- >>> SparseDtype(int, 1)._subtype_with_str dtype('int64') >>> SparseDtype(object, 1)._subtype_with_str dtype('O') >>> dtype = SparseDtype(str, \"\") >>> dtype.subtype dtype('O') >>> dtype._subtype_with_str", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:_subtype_with_str arg:self arguments arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "never_record_summaries", + "source_code": "def never_record_summaries():\n return record_if(False)", + "docstring": "Sets the should_record_summaries Tensor to always false.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:never_record_summaries arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "flush", + "source_code": "def flush(self) -> None:\n raise NotImplementedError", + "docstring": "Flush the generated kernel and python wrapper code to the source code file.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:flush arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "add_debug_tensor_watch", + "source_code": "def add_debug_tensor_watch(run_options, node_name, output_slot=0, debug_ops='DebugIdentity', debug_urls=None, tolerate_debug_op_creation_failures=False, global_step=-1):\n watch_opts = run_options.debug_options.debug_tensor_watch_opts\n run_options.debug_options.global_step = global_step\n watch = watch_opts.add()\n watch.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures\n watch.node_name = node_name\n watch.output_slot = output_slot\n if isinstance(debug_ops, str):\n debug_ops = [debug_ops]\n watch.debug_ops.extend(debug_ops)\n if debug_urls:\n if isinstance(debug_urls, str):\n debug_urls = [debug_urls]\n watch.debug_urls.extend(debug_urls)", + "docstring": "Add watch on a to . N.B.: 1. Under certain circumstances, the may not get actually watched (e.g., if the node of the is constant-folded during runtime). 2. For debugging purposes, the attribute of all s in the graph are set to 1 to prevent any node from being executed multiple times concurrently. This change does not affect subsequent non-debugged runs of the same s. Args: run_options: An instance of to be modified. node_name: () name of the node to watch. output_slot: () output slot index of the tensor from the watched node. debug_ops: ( or of ) name(s) of the debug op(s). Can be a of or a single . The latter case is equivalent to a of with only one element. For debug op types with customizable attributes, each debug op string can optionally contain a list of attribute names, in the syntax of: debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...) debug_urls: ( or of ) URL(s) to send debug values to, e.g., , . tolerate_debug_op_creation_failures: () Whether to tolerate debug op creation failures by not throwing exceptions. global_step: () Optional global_step count for this debug tensor watch.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_utils.py", + "ast_data": "FunctionDef name:add_debug_tensor_watch arg:run_options arg:node_name arg:output_slot arg:debug_ops arg:debug_urls arg:tolerate_debug_op_creation_failures arg:global_step arguments arg arg arg arg arg arg arg Assign Assign Assign Call Assign Assign Assign If Call Assign Call If If Call Assign Call" + }, + { + "library": "seaborn", + "name": "has_xy_data", + "source_code": "@property\ndef has_xy_data(self):\n return bool({'x', 'y'} & set(self.variables))", + "docstring": "Return True at least one of x or y is defined.", + "type": "method", + "file_path": "seaborn\\seaborn\\distributions.py", + "ast_data": "FunctionDef name:has_xy_data arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "convert_saved_model", + "source_code": "@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL)\ndef convert_saved_model(**kwargs):\n model_flags = build_model_flags(**kwargs)\n conversion_flags = build_conversion_flags(**kwargs)\n data = convert(model_flags, conversion_flags, input_data_str=None, debug_info_str=None)\n return data", + "docstring": "Converts a SavedModel using TF Lite converter.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", + "ast_data": "FunctionDef name:convert_saved_model arguments arg Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "deref", + "source_code": "def deref(self):\n return self._wrapped", + "docstring": "Returns the referenced object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py", + "ast_data": "FunctionDef name:deref arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "map_placements_after_reduction", + "source_code": "def map_placements_after_reduction(placements: tuple[Placement, ...], reduction_dims: list[int], reduction_dims_map: list[int], reduction_op: ReductionOpType) -> tuple[Placement, ...]:\n new_placements: list[Placement] = []\n for placement in placements:\n if isinstance(placement, (Replicate, Partial)):\n new_placements.append(placement)\n else:\n assert isinstance(placement, Shard)\n shard_dim = placement.dim\n new_shard_dim = reduction_dims_map[shard_dim]\n if new_shard_dim == -1 or shard_dim in reduction_dims:\n new_placements.append(get_placement_from_reduction_op(reduction_op))\n else:\n new_placements.append(Shard(new_shard_dim))\n return tuple(new_placements)", + "docstring": "Map each placement based on the output shape after reduction.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py", + "ast_data": "FunctionDef name:map_placements_after_reduction arg:placements arg:reduction_dims arg:reduction_dims_map arg:reduction_op arguments arg arg arg arg For If Call Call Call Assign Assign If BoolOp Compare Compare Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_artifact_urls", + "source_code": "def _get_artifact_urls(prefix: str, workflow_run_id: int) -> dict[Path, str]:\n response = requests.get(f'{PYTORCH_REPO}/actions/runs/{workflow_run_id}/artifacts?per_page=100', headers=_get_request_headers())\n artifacts = response.json()['artifacts']\n while 'next' in response.links.keys():\n response = requests.get(response.links['next']['url'], headers=_get_request_headers())\n artifacts.extend(response.json()['artifacts'])\n artifact_urls = {}\n for artifact in artifacts:\n if artifact['name'].startswith(prefix):\n artifact_urls[Path(artifact['name'])] = artifact['archive_download_url']\n return artifact_urls", + "docstring": "Get all workflow artifacts with 'test-report' in the name.", + "type": "function", + "file_path": "pytorch\\tools\\stats\\upload_stats_lib.py", + "ast_data": "FunctionDef name:_get_artifact_urls arg:prefix arg:workflow_run_id arguments arg arg Assign Call Call Assign Call While Compare Call Assign Call Call Call Call Assign For If Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_cleanup_registry_based_on_opset_version", + "source_code": "def _cleanup_registry_based_on_opset_version(self) -> None:\n cleaned_functions = {}\n for target_or_name, decomps in self.functions.items():\n decomps = [d for d in decomps if d.opset_introduced <= self.opset_version]\n if decomps:\n max_opset = max((d.opset_introduced for d in decomps))\n cleaned_functions[target_or_name] = [d for d in decomps if d.opset_introduced == max_opset]\n self.functions = cleaned_functions", + "docstring": "Pick the implementation with the highest opset version valid until the current opset version.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py", + "ast_data": "FunctionDef name:_cleanup_registry_based_on_opset_version arg:self arguments arg Assign For Call Assign Compare If Assign Call Assign Compare Assign" + }, + { + "library": "tensorflow", + "name": "_sort_or_argsort", + "source_code": "def _sort_or_argsort(values, axis, direction, return_argsort):\n if direction not in _SORT_IMPL:\n valid_directions = ', '.join(sorted(_SORT_IMPL.keys()))\n raise ValueError(f'Argument `direction` should be one of {valid_directions}. Received: direction={direction}')\n axis = framework_ops.convert_to_tensor(axis, name='axis')\n axis_static = tensor_util.constant_value(axis)\n if axis.shape.ndims not in (None, 0) or axis_static is None:\n raise ValueError(f'Argument `axis` must be a constant scalar. Received: axis={axis}.')\n axis_static = int(axis_static)\n values = framework_ops.convert_to_tensor(values, name='values')\n return _SORT_IMPL[direction](values, axis_static, return_argsort)", + "docstring": "Internal sort/argsort implementation. Args: values: The input values. axis: The axis along which to sort. direction: 'ASCENDING' or 'DESCENDING'. return_argsort: Whether to return the argsort result. Returns: Either the sorted values, or the indices of the sorted values in the original tensor. See the and docstrings. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sort_ops.py", + "ast_data": "FunctionDef name:_sort_or_argsort arg:values arg:axis arg:direction arg:return_argsort arguments arg arg arg arg If Compare Assign Call Call Call Raise Call Assign Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "batched_forward", + "source_code": "def batched_forward(model: Module, data: Tensor, device: Device, batch_size: int=128, **kwargs: Dict[str, Any]) -> Tensor:\n model_dev = model.to(device)\n B: int = len(data)\n bs: int = batch_size\n if B > batch_size:\n out_list = []\n n_batches = int(B // bs + 1)\n for batch_idx in range(n_batches):\n st = batch_idx * bs\n if batch_idx == n_batches - 1:\n if (batch_idx + 1) * bs > B:\n end = B\n else:\n end = (batch_idx + 1) * bs\n else:\n end = (batch_idx + 1) * bs\n if st >= end:\n continue\n out_list.append(model_dev(data[st:end].to(device), **kwargs))\n out = concatenate(out_list, 0)\n return out.to(data.device)\n return model(data, **kwargs)", + "docstring": "Run the forward in micro-batches. When the just model.forward(data) does not fit into device memory, e.g. on laptop GPU. In the end, it transfers the output to the device of the input data tensor. E.g. running HardNet on 8000x1x32x32 tensor. Args: model: Any torch model, which outputs a single tensor as an output. data: Input data of Bx(Any) shape. device: which device should we run on. batch_size: \"micro-batch\" size. **kwargs: any other arguments, which accepts model. Returns: output of the model. Example: >>> patches = torch.rand(8000, 1, 32, 32) >>> sift = kornia.feature.SIFTDescriptor(32) >>> desc_batched = batched_forward(sift, patches, torch.device('cpu'), 128) >>> desc = sift(patches) >>> assert torch.allclose(desc, desc_batched)", + "type": "function", + "file_path": "kornia\\kornia\\utils\\memory.py", + "ast_data": "FunctionDef name:batched_forward arg:model arg:data arg:device arg:batch_size arguments arg arg arg arg arg Assign Call Call If Compare Assign Assign Call For Call Assign If Compare If Compare Assign Assign Assign If Compare Call Call Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "value_counts", + "source_code": "def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True) -> Series:\n return algorithms.value_counts_internal(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna)", + "docstring": "Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : bool, default False If True then the object returned will contain the relative frequencies of the unique values. sort : bool, default True Sort by frequencies when True. Preserve the order of the data when False. ascending : bool, default False Sort in ascending order. bins : int, optional Rather than count values, group them into half-open bins, a convenience for `normalizeTruedropnaFalse` doesn't have the same categories. >>> df = pd.DataFrame({\"a\": [1], \"b\": [\"2\"], \"c\": [3], \"d\": [3]}) >>> df = df.astype({\"a\": \"category\", \"c\": \"category\", \"d\": \"category\"}) >>> df a b c d 0 1 2 3 3 >>> df.dtypes a category b object c category d category dtype: object >>> df.dtypes.value_counts() category 2 category 1 object 1 Name: count, dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:value_counts arg:self arg:normalize arg:sort arg:ascending arg:bins arg:dropna arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_color", + "source_code": "def set_color(self, color):\n axis = self._axis_map[self._orientation]\n axis.set_tick_params(colors=color)\n for spine in self.spines.values():\n if spine.axis is axis:\n spine.set_color(color)\n axis.label.set_color(color)", + "docstring": "Change the color of the secondary Axes and all decorators. Parameters ---------- color : :mpltype:", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py", + "ast_data": "FunctionDef name:set_color arg:self arg:color arguments arg arg Assign Call For Call If Compare Call Call" + }, + { + "library": "scipy", + "name": "shape", + "source_code": "def shape(self):\n return self.data.shape", + "docstring": "Returns the shape tuple of the data variable. This is a read-only attribute and can not be modified in the same manner of other numpy arrays.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_netcdf.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "validate_token", + "source_code": "def validate_token(self, token, scopes, request):\n if not token:\n raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n if token.is_expired():\n raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n if token.is_revoked():\n raise InvalidTokenError(realm=self.realm, extra_attributes=self.extra_attributes)\n if self.scope_insufficient(token.get_scope(), scopes):\n raise InsufficientScopeError()", + "docstring": "Check if token is active and matches the requested scopes.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6750\\validator.py", + "ast_data": "FunctionDef name:validate_token arg:self arg:token arg:scopes arg:request arguments arg arg arg arg If Raise Call If Call Raise Call If Call Raise Call If Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "assert_same_float_dtype", + "source_code": "@tf_export('debugging.assert_same_float_dtype', v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('assert_same_float_dtype')\ndef assert_same_float_dtype(tensors=None, dtype=None):\n if tensors:\n dtype = _assert_same_base_type(tensors, dtype)\n if not dtype:\n dtype = dtypes.float32\n elif not dtype.is_floating:\n raise ValueError('Expected floating point type, got %s.' % dtype)\n return dtype", + "docstring": "Validate and return float type based on and . For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all are the same type, validates that type is (if supplied), and returns the type. Type must be a floating point type. If neither nor is supplied, the function will return . Args: tensors: Tensors of input values. Can include elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither nor is supplied, or result is not float, or the common type of the inputs is not a floating point type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_same_float_dtype arg:tensors arg:dtype arguments arg arg If Assign Call If Assign If Raise Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "prepare_planning_info", + "source_code": "def prepare_planning_info(nodes: list[BaseSchedulerNode], name_to_buf: dict[str, SchedulerBuffer], name_to_fused_node: dict[str, BaseSchedulerNode], graph_inputs: OrderedSet[str], graph_outputs: OrderedSet[str]) -> tuple[int, dict[str, FreeableInputBuffer]]:\n name_to_freeable_input_buf = get_freeable_input_buf(nodes, graph_inputs)\n assign_memory_planning_info_for_scheduler_buffers(nodes, name_to_buf)\n assign_memory_planning_info_for_scheduler_nodes(nodes, name_to_fused_node, name_to_buf, name_to_freeable_input_buf)\n estimated_peak_memory, _ = estimate_peak_memory(nodes, name_to_freeable_input_buf, graph_outputs)\n return (estimated_peak_memory, name_to_freeable_input_buf)", + "docstring": "Prepare planning info. As nodes are scheduled one at a time, these help keep track of when a buffer can be freed, and when a node can be scheduled Returns: int: peak memory estimation dict[str, FreeableInputBuffer]: name to freeable input buffer", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\memory.py", + "ast_data": "FunctionDef name:prepare_planning_info arg:nodes arg:name_to_buf arg:name_to_fused_node arg:graph_inputs arg:graph_outputs arguments arg arg arg arg arg Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "ids", + "source_code": "@property\ndef ids(self) -> Index:\n return self.data.columns", + "docstring": "Column names. Returns ------- ids : Index DataFrame's column names.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:ids arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "SigmoidTransform", + "source_code": "class SigmoidTransform(Transform):\n domain = constraints.real\n codomain = constraints.unit_interval\n bijective = True\n sign = +1\n\n def __eq__(self, other):\n return isinstance(other, SigmoidTransform)\n\n def _call(self, x):\n return _clipped_sigmoid(x)\n\n def _inverse(self, y):\n finfo = torch.finfo(y.dtype)\n y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps)\n return y.log() - (-y).log1p()\n\n def log_abs_det_jacobian(self, x, y):\n return -F.softplus(-x) - F.softplus(x)", + "docstring": "Transform via the mapping :math: and :math:.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "ClassDef name:SigmoidTransform Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Assign Call Assign Call Return return:yes Call Call FunctionDef name:log_abs_det_jacobian arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "count_tangents", + "source_code": "def count_tangents(fx_g: torch.fx.GraphModule) -> int:\n\n def is_saved_tensor(x: Node) -> bool:\n return 'tangents' not in x.name and 'bwd_seed' not in x.name and ('bwd_base_offset' not in x.name) and ('bwd_rng_state' not in x.name)\n arg_count = 0\n static_arg_idxs = []\n for n in fx_g.graph.nodes:\n if n.op == 'placeholder':\n if is_saved_tensor(n):\n static_arg_idxs.append(arg_count)\n arg_count += 1\n assert static_arg_idxs == list(range(len(static_arg_idxs)))\n return len(static_arg_idxs)", + "docstring": "Infers which inputs are static for a backwards graph", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:count_tangents arg:fx_g arguments arg FunctionDef name:is_saved_tensor arg:x arguments arg Return return:yes BoolOp Compare Compare Compare Compare Assign Assign For If Compare If Call Call Compare Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maybe_assert_dtype", + "source_code": "def _maybe_assert_dtype(self, x):\n if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:\n raise TypeError('Input had dtype %s but expected %s.' % (self.dtype, x.dtype))", + "docstring": "Helper to check dtype when self.dtype is known.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:_maybe_assert_dtype arg:self arg:x arguments arg arg If BoolOp Compare Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "shortcut_string_merge", + "source_code": "def shortcut_string_merge(self, node_def):\n device = node_def.device or ''\n merge_key = (self._spec, device)\n result = _string_merge_cache.get(merge_key)\n if result is None:\n result = self.__call__(node_def).to_string()\n _string_merge_cache[merge_key] = result\n return result", + "docstring": "Merge a node def without materializing a full DeviceSpec object. Often a device merge is invoked in order to generate a string which can be passed into the c api. In such a case, we can cache the node_def.device -> merge_result_string map, and in most cases avoid: - Materializing a copy of self._spec (In the case of DeviceSpecV1) - Materializing a DeviceSpec for node_def.device - A DeviceSpec.merge_from invocation In practice the cache hit rate for this function is very high, because the number of invocations when iterating through the device stack is much larger than the number of devices. Args: node_def: An Operation (or Operation-like) to merge device constraints with self._spec Returns: A string containing the merged device specification.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py", + "ast_data": "FunctionDef name:shortcut_string_merge arg:self arg:node_def arguments arg arg Assign BoolOp Assign Assign Call If Compare Assign Call Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_unmasked_polys", + "source_code": "def _get_unmasked_polys(self):\n mask = np.any(np.ma.getmaskarray(self._coordinates), axis=-1)\n mask = mask[0:-1, 0:-1] | mask[1:, 1:] | mask[0:-1, 1:] | mask[1:, 0:-1]\n arr = self.get_array()\n if arr is not None:\n arr = np.ma.getmaskarray(arr)\n if arr.ndim == 3:\n mask |= np.any(arr, axis=-1)\n elif arr.ndim == 2:\n mask |= arr\n else:\n mask |= arr.reshape(self._coordinates[:-1, :-1, :].shape[:2])\n return ~mask", + "docstring": "Get the unmasked regions using the coordinates and array", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_get_unmasked_polys arg:self arguments arg Assign Call Call Assign Assign Call If Compare Assign Call If Compare Call If Compare Call Return return:yes" + }, + { + "library": "django", + "name": "y", + "source_code": "@y.setter\ndef y(self, value):\n self._cs.setOrdinate(1, 0, value)", + "docstring": "Set the Y component of the Point.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:y arg:self arg:value arguments arg arg Call" + }, + { + "library": "django", + "name": "auto_id", + "source_code": "@property\ndef auto_id(self):\n auto_id = self.form.auto_id\n if auto_id and '%s' in str(auto_id):\n return auto_id % self.html_name\n elif auto_id:\n return self.html_name\n return ''", + "docstring": "Calculate and return the ID attribute for this BoundField, if the associated Form has specified auto_id. Return an empty string otherwise.", + "type": "method", + "file_path": "django\\django\\forms\\boundfield.py", + "ast_data": "FunctionDef name:auto_id arg:self arguments arg Assign If BoolOp Compare Call Return return:yes If Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "aps02_f", + "source_code": "def aps02_f(x):\n ii = np.arange(1, 21)\n return -2 * np.sum((2 * ii - 5) ** 2 / (x - ii ** 2) ** 3)", + "docstring": "poles at x=n**2, 1st and 2nd derivatives at root are also close to 0", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps02_f arg:x arguments arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_zeros_slot", + "source_code": "def _zeros_slot(self, var, slot_name, op_name):\n named_slots = self._slot_dict(slot_name)\n if _var_key(var) not in named_slots:\n new_slot_variable = slot_creator.create_zeros_slot(var, op_name, copy_xla_sharding=True)\n self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[_var_key(var)] = new_slot_variable\n return named_slots[_var_key(var)]", + "docstring": "Find or create a slot initialized with 0.0. Args: var: A object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_zeros_slot arg:self arg:var arg:slot_name arg:op_name arguments arg arg arg arg Assign Call If Compare Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "check_nonsquare_error", + "source_code": "@ignore_warnings\ndef check_nonsquare_error(name, estimator_orig):\n X, y = make_blobs(n_samples=20, n_features=10)\n estimator = clone(estimator_orig)\n with raises(ValueError, err_msg=f'The pairwise estimator {name} does not raise an error on non-square data'):\n estimator.fit(X, y)", + "docstring": "Test that error is thrown when non-square data provided.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:check_nonsquare_error arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call With Call Call" + }, + { + "library": "scipy", + "name": "determine_backend_multi", + "source_code": "def determine_backend_multi(dispatchables, *, domain, only=True, coerce=False, **kwargs):\n if 'dispatch_type' in kwargs:\n disp_type = kwargs.pop('dispatch_type')\n dispatchables = tuple((d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) for d in dispatchables))\n else:\n dispatchables = tuple(dispatchables)\n if not all((isinstance(d, Dispatchable) for d in dispatchables)):\n raise TypeError('dispatchables must be instances of uarray.Dispatchable')\n if len(kwargs) != 0:\n raise TypeError(f'Received unexpected keyword arguments: {kwargs}')\n backend = _uarray.determine_backend(domain, dispatchables, coerce)\n return set_backend(backend, coerce=coerce, only=only)", + "docstring": "Set a backend supporting all `determine_backend_multimarking determine_backenddetermine_backend_multi` argument. >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): ... a, b = ex.TypeA(), ex.TypeB() ... with ua.determine_backend_multi( ... [a, b], dispatch_type=\"mark\", domain=\"ua_examples\" ... ): ... res = ex.creation_multimethod() ... ex.call_multimethod(res, a, b) TypeA", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", + "ast_data": "FunctionDef name:determine_backend_multi arg:dispatchables arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Call Assign Call If Call Call Raise Call If Compare Call Raise Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_input_at", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_input_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')", + "docstring": "Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:get_input_at arg:self arg:node_index arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "matmul", + "source_code": "def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:\n if A is None:\n return B\n if is_sparse(A):\n return torch.sparse.mm(A, B)\n return torch.matmul(A, B)", + "docstring": "Multiply two matrices. If A is None, return B. A can be sparse or dense. B is always dense.", + "type": "function", + "file_path": "pytorch\\torch\\_linalg_utils.py", + "ast_data": "FunctionDef name:matmul arg:A arg:B arguments arg arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "export_memory_timeline", + "source_code": "def export_memory_timeline(self, path, device_str) -> None:\n times, sizes = self._coalesce_timeline(device_str)\n import json\n with open(path, 'w') as f:\n json.dump([times, sizes], f)", + "docstring": "Saves the memory timeline as [times, sizes by category] as a JSON formatted file to the given path for the given device.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", + "ast_data": "FunctionDef name:export_memory_timeline arg:self arg:path arg:device_str arguments arg arg arg Assign Call With Call Call" + }, + { + "library": "tensorflow", + "name": "_assert_nodes_are_present", + "source_code": "def _assert_nodes_are_present(name_to_node, nodes):\n for d in nodes:\n assert d in name_to_node, '%s is not in graph' % d", + "docstring": "Assert that nodes are present in the graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py", + "ast_data": "FunctionDef name:_assert_nodes_are_present arg:name_to_node arg:nodes arguments arg arg For Compare" + }, + { + "library": "tensorflow", + "name": "tf_buffer", + "source_code": "@tf_contextlib.contextmanager\ndef tf_buffer(data=None):\n if data:\n buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))\n else:\n buf = c_api.TF_NewBuffer()\n try:\n yield buf\n finally:\n c_api.TF_DeleteBuffer(buf)", + "docstring": "Context manager that creates and deletes TF_Buffer. Example usage: with tf_buffer() as buf: # get serialized graph def into buf ... proto_data = c_api.TF_GetBuffer(buf) graph_def.ParseFromString(compat.as_bytes(proto_data)) # buf has been deleted with tf_buffer(some_string) as buf: c_api.TF_SomeFunction(buf) # buf has been deleted Args: data: An optional , , or object. If not None, the yielded buffer will contain this data. Yields: Created TF_Buffer", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py", + "ast_data": "FunctionDef name:tf_buffer arg:data arguments arg If Assign Call Call Assign Call Try Call" + }, + { + "library": "scipy", + "name": "QuantileTestResult", + "source_code": "@dataclass\nclass QuantileTestResult:\n statistic: float\n statistic_type: int\n pvalue: float\n _alternative: list[str] = field(repr=False)\n _x: np.ndarray = field(repr=False)\n _p: float = field(repr=False)\n\n def confidence_interval(self, confidence_level=0.95):\n alternative = self._alternative\n p = self._p\n x = np.sort(self._x)\n n = len(x)\n bd = stats.binom(n, p)\n if confidence_level <= 0 or confidence_level >= 1:\n message = '`confidence_level` must be a number between 0 and 1.'\n raise ValueError(message)\n low_index = np.nan\n high_index = np.nan\n if alternative == 'less':\n p = 1 - confidence_level\n low = -np.inf\n high_index = int(bd.isf(p))\n high = x[high_index] if high_index < n else np.nan\n elif alternative == 'greater':\n p = 1 - confidence_level\n low_index = int(bd.ppf(p)) - 1\n low = x[low_index] if low_index >= 0 else np.nan\n high = np.inf\n elif alternative == 'two-sided':\n p = (1 - confidence_level) / 2\n low_index = int(bd.ppf(p)) - 1\n low = x[low_index] if low_index >= 0 else np.nan\n high_index = int(bd.isf(p))\n high = x[high_index] if high_index < n else np.nan\n return ConfidenceInterval(low, high)", + "docstring": "Result of . Attributes ---------- statistic: float The statistic used to calculate the p-value; either `` means there is evidence that it is significantly less than the hypothesized value. pvalue : float The p-value of the hypothesis test.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "ClassDef name:QuantileTestResult Call Call Call FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Assign Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Raise Call Assign Assign If Compare Assign Assign Assign Call Call Assign Compare If Compare Assign Assign Call Call Assign Compare Assign If Compare Assign Assign Call Call Assign Compare Assign Call Call Assign Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "blend_soft_light", + "source_code": "def blend_soft_light(self, rgb, intensity):\n return 2 * intensity * rgb + (1 - 2 * intensity) * rgb ** 2", + "docstring": "Combine an RGB image with an intensity map using \"soft light\" blending, using the \"pegtop\" formula. Parameters ---------- rgb : An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image). intensity : An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image). Returns ------- An (M, N, 3) RGB array representing the combined images.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:blend_soft_light arg:self arg:rgb arg:intensity arguments arg arg arg Return return:yes" + }, + { + "library": "scrapy", + "name": "__init__", + "source_code": "def __init__(self, stream_id: int, request: Request, protocol: H2ClientProtocol, download_maxsize: int=0, download_warnsize: int=0) -> None:\n self.stream_id: int = stream_id\n self._request: Request = request\n self._protocol: H2ClientProtocol = protocol\n self._download_maxsize = self._request.meta.get('download_maxsize', download_maxsize)\n self._download_warnsize = self._request.meta.get('download_warnsize', download_warnsize)\n self.metadata: dict[str, Any] = {'request_content_length': 0 if self._request.body is None else len(self._request.body), 'request_sent': False, 'reached_warnsize': False, 'remaining_content_length': 0 if self._request.body is None else len(self._request.body), 'stream_closed_local': False, 'stream_closed_server': False}\n self._response: dict[str, Any] = {'body': BytesIO(), 'flow_controlled_size': 0, 'headers': Headers({})}\n\n def _cancel(_: Any) -> None:\n if self.metadata['request_sent']:\n self.reset_stream(StreamCloseReason.CANCELLED)\n else:\n self.close(StreamCloseReason.CANCELLED)\n self._deferred_response: Deferred[Response] = Deferred(_cancel)", + "docstring": "Arguments: stream_id -- Unique identifier for the stream within a single HTTP/2 connection request -- The HTTP request associated to the stream protocol -- Parent H2ClientProtocol instance", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:stream_id arg:request arg:protocol arg:download_maxsize arg:download_warnsize arguments arg arg arg arg arg arg Assign Call Assign Call Compare Call Compare Call Call Call FunctionDef name:_cancel arg:_ arguments arg If Call Call Call" + }, + { + "library": "tensorflow", + "name": "SharedObjectLoadingScope", + "source_code": "class SharedObjectLoadingScope(object):\n\n def __enter__(self):\n if _shared_object_disabled():\n return NoopLoadingScope()\n global SHARED_OBJECT_LOADING\n SHARED_OBJECT_LOADING.scope = self\n self._obj_ids_to_obj = {}\n return self\n\n def get(self, object_id):\n if object_id is None:\n return\n return self._obj_ids_to_obj.get(object_id)\n\n def set(self, object_id, obj):\n if object_id is None:\n return\n self._obj_ids_to_obj[object_id] = obj\n\n def __exit__(self, *args, **kwargs):\n global SHARED_OBJECT_LOADING\n SHARED_OBJECT_LOADING.scope = NoopLoadingScope()", + "docstring": "A context manager for keeping track of loaded objects. During the deserialization process, we may come across objects that are shared across multiple layers. In order to accurately restore the network structure to its original state, allows us to re-use shared objects rather than cloning them.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", + "ast_data": "ClassDef name:SharedObjectLoadingScope FunctionDef name:__enter__ arg:self arguments arg If Call Return return:yes Call Assign Assign Return return:yes FunctionDef name:get arg:self arg:object_id arguments arg arg If Compare Return return:no Return return:yes Call FunctionDef name:set arg:self arg:object_id arg:obj arguments arg arg arg If Compare Return return:no Assign FunctionDef name:__exit__ arg:self arguments arg arg arg Assign Call" + }, + { + "library": "kornia", + "name": "KORNIA_CHECK_SAME_SHAPE", + "source_code": "def KORNIA_CHECK_SAME_SHAPE(x: Tensor, y: Tensor, raises: bool=True) -> bool:\n if x.shape != y.shape:\n if raises:\n raise TypeError(f'Not same shape for tensors. Got: {x.shape} and {y.shape}')\n return False\n return True", + "docstring": "Check whether two tensor have the same shape. Args: x: first tensor to evaluate. y: sencod tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the two tensors have not the same shape and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(2, 3, 3) >>> KORNIA_CHECK_SAME_SHAPE(x1, x2) True", + "type": "function", + "file_path": "kornia\\kornia\\core\\check.py", + "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_SHAPE arg:x arg:y arg:raises arguments arg arg arg If Compare If Raise Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['normalizer_fn'] = serialization._deserialize_keras_object(config['normalizer_fn'], custom_objects=custom_objects)\n kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "print_op_coverage_summary", + "source_code": "def print_op_coverage_summary(model: nn.Module, args, kwargs, *, output_csv=False):\n import csv\n from tabulate import tabulate\n fwd_graph, bwd_graph = get_inductor_decomp_graphs(model, args, kwargs)\n op_counts = {}\n for node in fwd_graph.graph.nodes:\n if node.op == 'call_function' and isinstance(node.target, torch._ops.OpOverload):\n if node.target not in op_counts:\n op_counts[node.target] = 0\n op_counts[node.target] += 1\n for node in bwd_graph.graph.nodes:\n if node.op == 'call_function' and isinstance(node.target, torch._ops.OpOverload):\n if node.target not in op_counts:\n op_counts[node.target] = 0\n op_counts[node.target] += 1\n op_infos = []\n for op, count in op_counts.items():\n supported = op in DTensor._op_dispatcher.sharding_propagator.op_to_rules\n op_infos.append([op, str(op._schema), count, supported])\n count_idx = 2\n op_infos.sort(key=itemgetter(count_idx), reverse=True)\n headers = ['Operator', 'Schema', 'Total Count', 'Supported']\n print(tabulate(op_infos, headers=headers))\n if output_csv:\n with open('op_summary.csv', 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(headers)\n for row in op_infos:\n csv_writer.writerow(row)", + "docstring": "Util to print the operator coverage summary of a certain model with tabulute. Must have tabulate module installed.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_op_coverage.py", + "ast_data": "FunctionDef name:print_op_coverage_summary arg:model arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign For If BoolOp Compare Call If Compare Assign For If BoolOp Compare Call If Compare Assign Assign For Call Assign Compare Call Call Assign Call Call Assign Call Call If With Call Assign Call Call For Call" + }, + { + "library": "pandas", + "name": "generate_numba_transform_func", + "source_code": "@functools.cache\ndef generate_numba_transform_func(func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency('numba')\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def group_transform(values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any) -> np.ndarray:\n assert len(begin) == len(end)\n num_groups = len(begin)\n result = np.empty((len(values), num_columns))\n for i in numba.prange(num_groups):\n group_index = index[begin[i]:end[i]]\n for j in numba.prange(num_columns):\n group = values[begin[i]:end[i], j]\n result[begin[i]:end[i], j] = numba_func(group, group_index, *args)\n return result\n return group_transform", + "docstring": "Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function", + "type": "function", + "file_path": "pandas\\pandas\\core\\groupby\\numba_.py", + "ast_data": "FunctionDef name:generate_numba_transform_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:group_transform arg:values arg:index arg:begin arg:end arg:num_columns arguments arg arg arg arg arg arg Compare Call Call Assign Call Assign Call Call For Call Assign For Call Assign Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "insert", + "source_code": "def insert(self, func):\n token = self._next_unique_token()\n self._funcs[token] = func\n return token", + "docstring": "Registers and returns a unique token for this entry.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py", + "ast_data": "FunctionDef name:insert arg:self arg:func arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "tol", + "source_code": "@property\ndef tol(self):\n return self._tol", + "docstring": "positive float: The desired relative tolerance of calculations. Left unspecified, calculations may be faster; when provided, calculations may be more likely to meet the desired accuracy.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:tol arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "fields", + "source_code": "@cached_property\ndef fields(self):\n\n def is_not_an_m2m_field(f):\n return not (f.is_relation and f.many_to_many)\n\n def is_not_a_generic_relation(f):\n return not (f.is_relation and f.one_to_many)\n\n def is_not_a_generic_foreign_key(f):\n return not (f.is_relation and f.many_to_one and (not (hasattr(f.remote_field, 'model') and f.remote_field.model)))\n return make_immutable_fields_list('fields', (f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f)))", + "docstring": "Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:fields arg:self arguments arg FunctionDef name:is_not_an_m2m_field arg:f arguments arg Return return:yes BoolOp FunctionDef name:is_not_a_generic_relation arg:f arguments arg Return return:yes BoolOp FunctionDef name:is_not_a_generic_foreign_key arg:f arguments arg Return return:yes BoolOp BoolOp Call Return return:yes Call Call BoolOp Call Call Call" + }, + { + "library": "tensorflow", + "name": "write_object_proto_for_resource_variable", + "source_code": "def write_object_proto_for_resource_variable(resource_variable, proto, options, enforce_naming=True):\n proto.variable.SetInParent()\n if enforce_naming and (not resource_variable.name.endswith(':0')):\n raise ValueError(f\"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ':0')which won't be restored.\")\n proto.variable.name = tensor_module.get_op_name(resource_variable.name)\n proto.variable.trainable = resource_variable.trainable\n proto.variable.dtype = resource_variable.dtype.as_datatype_enum\n proto.variable.synchronization = resource_variable.synchronization.value\n proto.variable.aggregation = resource_variable.aggregation.value\n proto.variable.shape.CopyFrom(resource_variable.shape.as_proto())\n if options.experimental_variable_policy._save_variable_devices():\n if hasattr(resource_variable, 'device'):\n proto.variable.device = resource_variable.device", + "docstring": "Writes additional information of the variable into the SavedObject proto. This allows users to define a to provide extra information of the variable to the SavedObject. For example, DistributedVariable class would fill in components in the distributed context. Args: resource_variable: A or that has the information to be saved into the proto. proto: proto to update. options: A instance that configures save behavior. enforce_naming: A bool determining whether to check that names end in the expected string ':0'", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:write_object_proto_for_resource_variable arg:resource_variable arg:proto arg:options arg:enforce_naming arguments arg arg arg arg Call If BoolOp Call Raise Call Assign Call Assign Assign Assign Assign Call Call If Call If Call Assign" + }, + { + "library": "django", + "name": "base36_to_int", + "source_code": "def base36_to_int(s):\n if len(s) > 13:\n raise ValueError('Base36 input too large')\n return int(s, 36)", + "docstring": "Convert a base 36 string to an int. Raise ValueError if the input won't fit into an int.", + "type": "function", + "file_path": "django\\django\\utils\\http.py", + "ast_data": "FunctionDef name:base36_to_int arg:s arguments arg If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_initial_nodes", + "source_code": "def _initial_nodes(n):\n fit = 0.49082003 * n - 4.37859653\n turnover = around(fit).astype(int)\n ia = arange(1, int(floor(n * 0.5) + 1))\n ib = ia[::-1]\n xasq = _initial_nodes_a(n, ia[:turnover + 1])\n xbsq = _initial_nodes_b(n, ib[turnover + 1:])\n iv = sqrt(hstack([xasq, xbsq]))\n if n % 2 == 1:\n iv = hstack([0.0, iv])\n return iv", + "docstring": "Initial guesses for the Hermite roots Computes an initial approximation to the non-negative roots :math: of the Hermite polynomial :math: of order :math:. The Tricomi and Gatteschi initial guesses are used in the region where they are accurate. Parameters ---------- n : int Quadrature order Returns ------- xk : ndarray Approximate roots See Also -------- roots_hermite_asy", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:_initial_nodes arg:n arguments arg Assign Assign Call Call Assign Call Call Call Assign Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "mT", + "source_code": "@property\ndef mT(self):\n if self.ndim < 2:\n raise ValueError('matrix transpose with ndim < 2 is undefined')\n if self._mask is nomask:\n return masked_array(data=self._data.mT)\n else:\n return masked_array(data=self.data.mT, mask=self.mask.mT)", + "docstring": "Return the matrix-transpose of the masked array. The matrix transpose is the transpose of the last two dimensions, even if the array is of higher dimension. .. versionadded:: 2.0 Returns ------- result: MaskedArray The masked array with the last two dimensions transposed Raises ------ ValueError If the array is of dimension less than 2. See Also -------- ndarray.mT: Equivalent method for arrays", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:mT arg:self arguments arg If Compare Raise Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_verbosity", + "source_code": "@tf_export(v1=['logging.set_verbosity'])\ndef set_verbosity(v):\n get_logger().setLevel(v)", + "docstring": "Sets the threshold for what messages will be logged.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py", + "ast_data": "FunctionDef name:set_verbosity arg:v arguments arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "Process", + "source_code": "class Process(object):\n\n def __init__(self, *args, **kwargs):\n del args, kwargs\n raise unittest.SkipTest('TODO(b/150264776): Windows is not supported in MultiProcessRunner.')", + "docstring": "A process that skips test (until windows is supported).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py", + "ast_data": "ClassDef name:Process FunctionDef name:__init__ arg:self arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "tracing_enabled", + "source_code": "def tracing_enabled():\n return _thread_local_data.enable_call_tracing", + "docstring": "Whether to add extra traces to the queue.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:tracing_enabled arguments Return return:yes" + }, + { + "library": "numpy", + "name": "shrink_mask", + "source_code": "def shrink_mask(self):\n self._mask = _shrink_mask(self._mask)\n return self", + "docstring": "Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- result : MaskedArray A :class: object. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]]) >>> x.shrink_mask() masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> x.mask False", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:shrink_mask arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_writePng", + "source_code": "def _writePng(self, img):\n buffer = BytesIO()\n img.save(buffer, format='png')\n buffer.seek(8)\n png_data = b''\n bit_depth = palette = None\n while True:\n length, type = struct.unpack(b'!L4s', buffer.read(8))\n if type in [b'IHDR', b'PLTE', b'IDAT']:\n data = buffer.read(length)\n if len(data) != length:\n raise RuntimeError('truncated data')\n if type == b'IHDR':\n bit_depth = int(data[8])\n elif type == b'PLTE':\n palette = data\n elif type == b'IDAT':\n png_data += data\n elif type == b'IEND':\n break\n else:\n buffer.seek(length, 1)\n buffer.seek(4, 1)\n return (png_data, bit_depth, palette)", + "docstring": "Write the image *img* into the pdf file using png predictors with Flate compression.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:_writePng arg:self arg:img arguments arg arg Assign Call Call Call Assign Assign While Assign Call Call If Compare Assign Call If Compare Call Raise Call If Compare Assign Call If Compare Assign If Compare If Compare Call Call Return return:yes" + }, + { + "library": "django", + "name": "Feed", + "source_code": "class Feed(BaseFeed):\n feed_type = GeoRSSFeed\n\n def feed_extra_kwargs(self, obj):\n return {'geometry': self._get_dynamic_attr('geometry', obj)}\n\n def item_extra_kwargs(self, item):\n return {'geometry': self._get_dynamic_attr('item_geometry', item)}", + "docstring": "This is a subclass of the from . This allows users to define a and/or methods on their own subclasses so that geo-referenced information may placed in the feed.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\feeds.py", + "ast_data": "ClassDef name:Feed Assign FunctionDef name:feed_extra_kwargs arg:self arg:obj arguments arg arg Return return:yes Call FunctionDef name:item_extra_kwargs arg:self arg:item arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_tf_type", + "source_code": "@tf_export('is_tensor')\ndef is_tf_type(x):\n return isinstance(x, tf_type_classes)", + "docstring": "Checks whether is a TF-native type that can be passed to many TF ops. Use to differentiate types that can ingested by TensorFlow ops without any conversion (e.g., , , and ) from types that need to be converted into tensors before they are ingested (e.g., numpy and Python scalars). For example, in the following code block: we check to make sure that is a tensor (and convert it if not) before accessing its and . (But note that not all TensorFlow native types have shapes or dtypes; is an example of a TensorFlow native type that has neither shape nor dtype.) Args: x: A python object to check. Returns: if is a TensorFlow-native type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py", + "ast_data": "FunctionDef name:is_tf_type arg:x arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "parameterized_truncated_normal", + "source_code": "def parameterized_truncated_normal(shape, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, dtype=dtypes.float32, seed=None, name=None):\n with ops.name_scope(name, 'parameterized_truncated_normal', [shape, means, stddevs, minvals, maxvals]) as name:\n shape_tensor = shape_util.shape_tensor(shape)\n means_tensor = ops.convert_to_tensor(means, dtype=dtype, name='means')\n stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name='stddevs')\n minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name='minvals')\n maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name='maxvals')\n seed1, seed2 = random_seed.get_seed(seed)\n rnd = gen_random_ops.parameterized_truncated_normal(shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed=seed1, seed2=seed2)\n shape_util.maybe_set_static_shape(rnd, shape)\n return rnd", + "docstring": "Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type . The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type . The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type . The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type . The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", + "ast_data": "FunctionDef name:parameterized_truncated_normal arg:shape arg:means arg:stddevs arg:minvals arg:maxvals arg:dtype arg:seed arg:name arguments arg arg arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_prefetch_handle", + "source_code": "@no_type_check\ndef _prefetch_handle(state: _FSDPState, current_handle: Optional[FlatParamHandle], prefetch_mode: _PrefetchMode) -> None:\n if not current_handle:\n return\n handle = _get_handle_to_prefetch(state, current_handle)\n if not handle:\n return\n prev_training_state = handle._training_state\n if prefetch_mode == _PrefetchMode.BACKWARD:\n handle._training_state = HandleTrainingState.BACKWARD_PRE\n elif prefetch_mode == _PrefetchMode.FORWARD:\n handle._training_state = HandleTrainingState.FORWARD\n else:\n raise ValueError(f'Invalid prefetch mode on rank {state.rank}: {prefetch_mode}')\n _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)\n handle._training_state = prev_training_state\n handle._prefetched = True", + "docstring": "Prefetches the next handles if needed (without synchronization). An empty handles key cannot prefetch.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_prefetch_handle arg:state arg:current_handle arg:prefetch_mode arguments arg arg arg If Return return:no Assign Call If Return return:no Assign If Compare Assign If Compare Assign Raise Call Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "assert_splits_match", + "source_code": "def assert_splits_match(nested_splits_lists):\n error_msg = 'Inputs must have identical ragged splits'\n for splits_list in nested_splits_lists:\n if len(splits_list) != len(nested_splits_lists[0]):\n raise ValueError(error_msg)\n return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]", + "docstring": "Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of tensors from a , ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_util.py", + "ast_data": "FunctionDef name:assert_splits_match arg:nested_splits_lists arguments arg Assign For If Compare Call Call Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_in_multi_worker_mode", + "source_code": "def _in_multi_worker_mode(self):\n return self._num_workers > 1", + "docstring": "Whether this strategy indicates working in multi-worker settings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py", + "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_get_sharding_prop_cache_info", + "source_code": "def _get_sharding_prop_cache_info():\n from torch.distributed.tensor._api import DTensor\n return DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding.cache_info()", + "docstring": "Get the cache info for the sharding propagation cache, used for debugging purpose only. This would return a named tuple showing hits, misses, maxsize and cursize of the sharding propagator cache.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\__init__.py", + "ast_data": "FunctionDef name:_get_sharding_prop_cache_info arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "PrependParamsBuffersConstantAotAutogradInputStep", + "source_code": "class PrependParamsBuffersConstantAotAutogradInputStep(InputAdaptStep):\n\n def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n ordered_params = tuple((model.state_dict[name] for name in model.graph_signature.parameters))\n non_persistent_buffers = set(model.graph_signature.non_persistent_buffers)\n ordered_buffers = []\n for name in model.graph_signature.buffers:\n if name in non_persistent_buffers:\n ordered_buffers.append(model.constants[name])\n else:\n ordered_buffers.append(model.state_dict[name])\n ordered_constant_tensors = tuple((model.constants[fqn] for fqn in model.graph_signature.lifted_tensor_constants))\n updated_args = (*ordered_params, *ordered_buffers, *ordered_constant_tensors, *model_args)\n if model_kwargs:\n return MergeKwargsIntoArgsInputStep().apply(updated_args, model_kwargs, model=model)\n return (updated_args, {})", + "docstring": "Prepend model parameters, buffers and constants to the user input. :func: lifts model parameters, buffers and constants as model input, thus, they must be added to the user input before the model is executed. Args: model: The PyTorch model with embedded parameters and buffers.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "ClassDef name:PrependParamsBuffersConstantAotAutogradInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call Assign Call Assign For If Compare Call Call Assign Call Assign If Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_tensor_description_to_json", + "source_code": "@classmethod\ndef _tensor_description_to_json(cls, tensor_desc):\n if tensor_desc is None:\n return None\n return {'element': cls._enum_to_json(tensor_desc.element), 'layout': cls._enum_to_json(tensor_desc.layout), 'alignment': tensor_desc.alignment, 'complex_transform': cls._enum_to_json(tensor_desc.complex_transform)}", + "docstring": "Convert TensorDescription to JSON dict. Args: tensor_desc: TensorDescription object Returns: dict: Dictionary representation", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py", + "ast_data": "FunctionDef name:_tensor_description_to_json arg:cls arg:tensor_desc arguments arg arg If Compare Return return:no Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "__call__", + "source_code": "def __call__(self, x):\n with np.errstate(invalid='ignore'):\n return umath.less_equal(x, self.critical_value)", + "docstring": "Executes the call behavior.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "device", + "source_code": "def device(*array_list, remove_none=True, remove_types=(str,)):\n array_list = _remove_non_arrays(*array_list, remove_none=remove_none, remove_types=remove_types)\n if not array_list:\n return None\n device_ = _single_array_device(array_list[0])\n for array in array_list[1:]:\n device_other = _single_array_device(array)\n if device_ != device_other:\n raise ValueError(f'Input arrays use different devices: {device_}, {device_other}')\n return device_", + "docstring": "Hardware device where the array data resides on. If the hardware device is not the same for all arrays, an error is raised. Parameters ---------- *array_list : arrays List of array instances from NumPy or an array API compatible library. remove_none : bool, default=True Whether to ignore None objects passed in array_list. remove_types : tuple or list, default=(str,) Types to ignore in array_list. Returns ------- out : device object (see the \"Device Support\" section of the array API spec).", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:device arguments arg arg arg Assign Call If Return return:no Assign Call For Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "assert_almost_equal", + "source_code": "def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):\n rtol, atol = (0, 1.5 * 10 ** (-decimal))\n return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)", + "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_array_api.py", + "ast_data": "FunctionDef name:assert_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "embedding_inference_rule", + "source_code": "@register_inference_rule(torch.nn.modules.sparse.Embedding)\ndef embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):\n assert isinstance(n.args[0], Node)\n return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)", + "docstring": "The output shape differs from the input shape in the last dimension", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py", + "ast_data": "FunctionDef name:embedding_inference_rule arg:n arg:module_instance arg:symbols arg:constraints arg:counter arguments arg arg arg arg arg Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "ones", + "source_code": "def ones(*size, dtype: Optional[torch.dtype]=None, layout: torch.layout=torch.strided, requires_grad: bool=False, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None) -> DTensor:\n torch_size = normalize_to_torch_size(size)\n return _dtensor_init_helper(torch.ones, torch_size, dtype=dtype, layout=layout, requires_grad=requires_grad, device_mesh=device_mesh, placements=placements)", + "docstring": "Returns a :class: filled with the scalar value 1, with the shape defined by the variable argument `DTensortorch.dtypeDTensortorch.set_default_dtypetorch.layoutDTensorDeviceMeshPlacementDTensor` object on each rank", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:ones arguments arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_schema", + "source_code": "def get_schema(frame, name: str, keys=None, con=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str:\n with pandasSQL_builder(con=con) as pandas_sql:\n return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype, schema=schema)", + "docstring": "Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : str name of SQL table keys : string or sequence, default: None columns to use a primary key con: ADBC Connection, SQLAlchemy connectable, sqlite3 connection, default: None ADBC provides high performance I/O with native type support, where available. Using SQLAlchemy makes it possible to use any DB supported by that library If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. schema: str, default: None Optional specifying the schema to be used in creating the table.", + "type": "function", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:get_schema arg:frame arg:name arg:keys arg:con arg:dtype arg:schema arguments arg arg arg arg arg arg With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_restore_slot_variable", + "source_code": "def _restore_slot_variable(self, slot_name, variable, slot_variable):\n variable_key = _var_key(variable)\n deferred_restorations = self._deferred_slot_restorations.get(slot_name, {}).pop(variable_key, [])\n deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)", + "docstring": "Restore a newly created slot variable's value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_restore_slot_variable arg:self arg:slot_name arg:variable arg:slot_variable arguments arg arg arg arg Assign Call Assign Call Call Call arguments arg For Call" + }, + { + "library": "pandas", + "name": "_isnan", + "source_code": "@property\ndef _isnan(self) -> npt.NDArray[np.bool_]:\n return self.asi8 == iNaT", + "docstring": "return if each value is nan", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_isnan arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "save", + "source_code": "def save(self, new_export_dir=None):\n is_input_text_proto = file_io.file_exists(file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)))\n if not new_export_dir:\n new_export_dir = self._export_dir\n if is_input_text_proto:\n path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))\n file_io.write_string_to_file(path, str(self._saved_model))\n else:\n path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n file_io.write_string_to_file(path, self._saved_model.SerializeToString(deterministic=True))\n tf_logging.info('SavedModel written to: %s', compat.as_text(path))", + "docstring": "Saves the updated . Args: new_export_dir: Path where the updated will be saved. If None, the input will be overriden with the updates. Raises: errors.OpError: If there are errors during the file save operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\method_name_updater.py", + "ast_data": "FunctionDef name:save arg:self arg:new_export_dir arguments arg arg Assign Call Call Call Call If Assign If Assign Call Call Call Call Call Assign Call Call Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "getrow", + "source_code": "def getrow(self, i):\n M, N = self.shape\n if i < 0:\n i += M\n if i < 0 or i >= M:\n raise IndexError('row index out of bounds')\n new = self._lil_container((1, N), dtype=self.dtype)\n new.rows[0] = self.rows[i][:]\n new.data[0] = self.data[i][:]\n return new", + "docstring": "Returns a copy of the 'i'th row.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_lil.py", + "ast_data": "FunctionDef name:getrow arg:self arg:i arguments arg arg Assign If Compare If BoolOp Compare Compare Raise Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "wrap", + "source_code": "def wrap(tensor, is_stacked=True, is_sparse_stacked=False):\n assert isinstance(is_stacked, bool)\n assert isinstance(is_sparse_stacked, bool)\n assert isinstance(tensor, tensor_lib.Tensor), type(tensor)\n assert not is_sparse_stacked or is_stacked, 'If the wrapped tensor is stacked via a sparse conversion, it must also be stacked.'\n return WrappedTensor(tensor, is_stacked, is_sparse_stacked)", + "docstring": "Helper to create a WrappedTensor object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:wrap arg:tensor arg:is_stacked arg:is_sparse_stacked arguments arg arg arg Call Call Call Call BoolOp Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_fuser_method_in_reversed_nested_tuple_format", + "source_code": "def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:\n assert config.fuser_method is not None\n if config._pattern_complex_format is not None:\n return config.fuser_method\n if not isinstance(config.pattern, tuple):\n raise ValueError('Expected pattern to be a tuple, got: ', config.pattern)\n if len(config.pattern) == 2:\n return _reverse2(config.fuser_method)\n elif len(config.pattern) == 3:\n return _reverse3(config.fuser_method)\n else:\n raise ValueError('Expected a tuple with 2 or 3 elements, got: ', config.pattern)", + "docstring": "Return the fuser method specified in the given config in the reversed nested tuple format used internally in the quantization pattern matching code. If pattern is specified in the reversed nested tuple format, we assume the fuser method is also specified in this format and simply return it as is. Otherwise, we convert the fuser method as follows: * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv) * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv), where bn_conv is a 2-tuple (bn, conv) The first argument of a fuser method is always and is not affected in the conversion. We currently only support functions with 3 or 4 arguments.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py", + "ast_data": "FunctionDef name:_get_fuser_method_in_reversed_nested_tuple_format arg:config arguments arg Compare If Compare Return return:yes If Call Raise Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call Raise Call" + }, + { + "library": "matplotlib", + "name": "string_width_height", + "source_code": "def string_width_height(self, s):\n if not len(s):\n return (0, 0)\n total_width = 0\n namelast = None\n miny = 1000000000.0\n maxy = 0\n for c in s:\n if c == '\\n':\n continue\n wx, name, bbox = self._metrics[ord(c)]\n total_width += wx + self._kern.get((namelast, name), 0)\n l, b, w, h = bbox\n miny = min(miny, b)\n maxy = max(maxy, b + h)\n namelast = name\n return (total_width, maxy - miny)", + "docstring": "Return the string width (including kerning) and string height as a (*w*, *h*) tuple.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:string_width_height arg:self arg:s arguments arg arg If Call Return return:yes Assign Assign Assign Assign For If Compare Assign Call Call Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_active", + "source_code": "def set_active(self, active):\n self._active = active", + "docstring": "Set whether the widget is active.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:set_active arg:self arg:active arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_pyval_update_fields", + "source_code": "def _pyval_update_fields(pyval, fields, depth):\n if not isinstance(pyval, (dict, list, tuple)):\n raise ValueError('Expected dict or nested list/tuple of dict')\n for key, target in fields.items():\n for _ in range(1, depth):\n target = target[-1]\n target.append(pyval[key] if isinstance(pyval, dict) else [])\n if isinstance(pyval, (list, tuple)):\n for child in pyval:\n _pyval_update_fields(child, fields, depth + 1)", + "docstring": "Append the field values from to . Args: pyval: A python , or nested list/tuple of , whose value(s) should be appended to . fields: A dictionary mapping string keys to field values. Field values extracted from are appended to this dictionary's values. depth: The depth at which should be appended to the field values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_pyval_update_fields arg:pyval arg:fields arg:depth arguments arg arg arg If Call Raise Call For Call For Call Assign Call Call If Call For Call" + }, + { + "library": "pandas", + "name": "asfreq", + "source_code": "@final\ndef asfreq(self, fill_value=None):\n return self._upsample('asfreq', fill_value=fill_value)", + "docstring": "Return the values at the new freq, essentially a reindex. Parameters ---------- fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- DataFrame or Series Values at the specified freq. See Also -------- Series.asfreq: Convert TimeSeries to specified frequency. DataFrame.asfreq: Convert TimeSeries to specified frequency. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... [\"2023-01-01\", \"2023-01-31\", \"2023-02-01\", \"2023-02-28\"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-31 2 2023-02-01 3 2023-02-28 4 dtype: int64 >>> ser.resample(\"MS\").asfreq() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\resample.py", + "ast_data": "FunctionDef name:asfreq arg:self arg:fill_value arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "JSConstructor", + "source_code": "class JSConstructor(JSCallable):\n allow_nesting = True\n\n def get_display_prefix(self) -> list[Node]:\n return [addnodes.desc_sig_keyword('class', 'class'), addnodes.desc_sig_space()]", + "docstring": "Like a callable but with a different prefix.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\javascript.py", + "ast_data": "ClassDef name:JSConstructor Assign FunctionDef name:get_display_prefix arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "exists", + "source_code": "def exists(self, name):\n raise NotImplementedError('subclasses of Storage must provide an exists() method')", + "docstring": "Return True if a file referenced by the given name already exists in the storage system, or False if the name is available for a new file.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:exists arg:self arg:name arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "resolve_shape_to_proxy", + "source_code": "def resolve_shape_to_proxy(shape: list[Union[int, torch.SymInt]], bound_symbols: dict[Any, Any]):\n from torch.utils._sympy.interp import sympy_interp\n from torch.utils._sympy.reference import PythonReferenceAnalysis\n ret = []\n for s in shape:\n if isinstance(s, torch.SymInt):\n ret.append(sympy_interp(PythonReferenceAnalysis, bound_symbols, s.node.expr))\n else:\n assert isinstance(s, int)\n ret.append(s)\n return ret", + "docstring": "Given a list of symints/ints, this function returns a calculated expression of bound_symbols' values. When we trace this function, we'll get a graph with call_function nodes that describes how the shape expr is computed from bound_symbols' values. Suppose shape = (s1*s2, s1+s2) and bound_symbols = {s1: arg0, s2: arg1}, the result will be (arg0 * arg1, arg0 + arg1).", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py", + "ast_data": "FunctionDef name:resolve_shape_to_proxy arg:shape arg:bound_symbols arguments arg arg Assign For If Call Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "right_multiplied_operator", + "source_code": "def right_multiplied_operator(J, d):\n J = aslinearoperator(J)\n\n def matvec(x):\n return J.matvec(np.ravel(x) * d)\n\n def matmat(X):\n return J.matmat(X * d[:, np.newaxis])\n\n def rmatvec(x):\n return d * J.rmatvec(x)\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec)", + "docstring": "Return J diag(d) as LinearOperator.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:right_multiplied_operator arg:J arg:d arguments arg arg Assign Call FunctionDef name:matvec arg:x arguments arg Return return:yes Call Call FunctionDef name:matmat arg:X arguments arg Return return:yes Call FunctionDef name:rmatvec arg:x arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "gml", + "source_code": "@property\ndef gml(self):\n return capi.to_gml(self.ptr)", + "docstring": "Return the GML representation of the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:gml arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "tail", + "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef tail(self, n: int=5) -> NDFrameT:\n if n:\n mask = self._make_mask_from_positional_indexer(slice(-n, None))\n else:\n mask = self._make_mask_from_positional_indexer([])\n return self._mask_selected_obj(mask)", + "docstring": "Return last n rows of each group. Similar to `` flag is ignored). Parameters ---------- n : int If positive: number of entries to include from end of each group. If negative: number of entries to exclude from start of each group. Returns ------- Series or DataFrame Subset of original Series or DataFrame as determined by n. %(see_also)s Examples -------- >>> df = pd.DataFrame( ... [[\"a\", 1], [\"a\", 2], [\"b\", 1], [\"b\", 2]], columns=[\"A\", \"B\"] ... ) >>> df.groupby(\"A\").tail(1) A B 1 a 2 3 b 2 >>> df.groupby(\"A\").tail(-1) A B 1 a 2 3 b 2", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:tail arg:self arg:n arguments arg arg If Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "set_language", + "source_code": "def set_language(request):\n next_url = request.POST.get('next', request.GET.get('next'))\n if (next_url or request.accepts('text/html')) and (not url_has_allowed_host_and_scheme(url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure())):\n next_url = request.META.get('HTTP_REFERER')\n if not url_has_allowed_host_and_scheme(url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure()):\n next_url = '/'\n response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204)\n if request.method == 'POST':\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n if lang_code and check_for_language(lang_code):\n if next_url:\n next_trans = translate_url(next_url, lang_code)\n if next_trans != next_url:\n response = HttpResponseRedirect(next_trans)\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE)\n return response", + "docstring": "Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state.", + "type": "function", + "file_path": "django\\django\\views\\i18n.py", + "ast_data": "FunctionDef name:set_language arg:request arguments arg Assign Call Call If BoolOp BoolOp Call Call Call Call Assign Call If Call Call Call Assign Assign Call Call If Compare Assign Call If BoolOp Call If Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "mp_wright_bessel", + "source_code": "def mp_wright_bessel(a, b, x, dps=50, maxterms=2000):\n with mp.workdps(dps):\n a, b, x = (mp.mpf(a), mp.mpf(b), mp.mpf(x))\n res = mp.nsum(lambda k: x ** k / mp.fac(k) * rgamma_cached(a * k + b, dps=dps), [0, mp.inf], tol=dps, method='s', steps=[maxterms])\n return mpf2float(res)", + "docstring": "Compute Wright's generalized Bessel function as Series with mpmath.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel_data.py", + "ast_data": "FunctionDef name:mp_wright_bessel arg:a arg:b arg:x arg:dps arg:maxterms arguments arg arg arg arg arg With Call Assign Call Call Call Assign Call arguments arg Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "start", + "source_code": "def start(self):\n if self.finalized:\n self.bus.log('Already deamonized.')\n if threading.active_count() != 1:\n self.bus.log('There are %r active threads. Daemonizing now may cause strange failures.' % threading.enumerate(), level=30)\n self.daemonize(self.stdin, self.stdout, self.stderr, self.bus.log)\n self.finalized = True", + "docstring": "Attempt to daemonize the process.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:start arg:self arguments arg If Call If Compare Call Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "update", + "source_code": "def update(self):\n if self.ivars['istep'] == 0:\n X_norm = float(torch.norm(self.X))\n iX_norm = X_norm ** (-1)\n A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm\n B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm\n self.fvars['X_norm'] = X_norm\n self.fvars['A_norm'] = A_norm\n self.fvars['B_norm'] = B_norm\n self.ivars['iterations_left'] = self.iparams['niter']\n self.ivars['converged_count'] = 0\n self.ivars['converged_end'] = 0\n if self.method == 'ortho':\n self._update_ortho()\n else:\n self._update_basic()\n self.ivars['iterations_left'] = self.ivars['iterations_left'] - 1\n self.ivars['istep'] = self.ivars['istep'] + 1", + "docstring": "Set and update iteration variables.", + "type": "method", + "file_path": "pytorch\\torch\\_lobpcg.py", + "ast_data": "FunctionDef name:update arg:self arguments arg If Compare Assign Call Call Assign Assign Call Call Call Assign Call Call Call Assign Assign Assign Assign Assign Assign If Compare Call Call Assign Assign" + }, + { + "library": "numpy", + "name": "hermvander3d", + "source_code": "def hermvander3d(x, y, z, deg):\n return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg)", + "docstring": "Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees and sample points `lmnxyz`0 >> from numpy.polynomial.hermite import hermvander3d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) >>> z = np.array([-1, 0, 1]) >>> hermvander3d(x, y, z, [0, 1, 2]) array([[ 1., -2., 2., -2., 4., -4.], [ 1., 0., -2., 0., 0., -0.], [ 1., 2., 2., 2., 4., 4.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermvander3d arg:x arg:y arg:z arg:deg arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, shift: int=1) -> None:\n super().__init__()\n self._shift = shift", + "docstring": "Initialize the renderer. Args: shift: Size of far-field layer: int", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\volume_renderer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:shift arguments arg arg Call Call Assign" + }, + { + "library": "kornia", + "name": "get_branges_scales", + "source_code": "def get_branges_scales(x, sample_drop_ratio=0.0):\n b, n, d = x.shape\n sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)\n brange = torch.randperm(b, device=x.device)[:sample_subset_size]\n residual_scale_factor = b / sample_subset_size\n return (brange, residual_scale_factor)", + "docstring": "Add bernoulli sampled range and scale.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py", + "ast_data": "FunctionDef name:get_branges_scales arg:x arg:sample_drop_ratio arguments arg arg Assign Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_add_file", + "source_code": "def _add_file(self, filename: str):\n *prefix, last = filename.split('/')\n if len(prefix) > 1 and prefix[0] == '.data':\n return\n package = self._get_or_create_package(prefix)\n if isinstance(package, _ExternNode):\n raise ImportError(f'inconsistent module structure. package contains a module file {filename} that is a subpackage of a module marked external.')\n if last == '__init__.py':\n package.source_file = filename\n elif last.endswith('.py'):\n package_name = last[:-len('.py')]\n package.children[package_name] = _ModuleNode(filename)", + "docstring": "Assembles a Python module out of the given file. Will ignore files in the .data directory. Args: filename (str): the name of the file inside of the package archive to be added", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_importer.py", + "ast_data": "FunctionDef name:_add_file arg:self arg:filename arguments arg arg Assign Call If BoolOp Compare Call Compare Return return:no Assign Call If Call Raise Call If Compare Assign If Call Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "mat_struct", + "source_code": "class mat_struct:\n pass", + "docstring": "Placeholder for holding read data from structs. We use instances of this class when the user passes False as a value to the `scipy.io.loadmat` function.", + "type": "class", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py", + "ast_data": "ClassDef name:mat_struct" + }, + { + "library": "django", + "name": "value_from_datadict", + "source_code": "def value_from_datadict(self, data, files, name):\n return data.get(name)", + "docstring": "Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:value_from_datadict arg:self arg:data arg:files arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Status", + "source_code": "class Status(Enum):\n SKIPPED = 'skipped'\n PASSED = 'passed'\n FAILED_COMPILE = 'failed_compile'\n FAILED_RUN_COMPILE_EXCEPTION = 'failed_run_compile_exception'\n FAILED_RUN_EAGER_EXCEPTION = 'failed_run_eager_exception'\n FAILED_RUN_RETURN = 'failed_run_return'\n\n def failing(self) -> bool:\n return self == Status.FAILED_COMPILE or self == Status.FAILED_RUN_EAGER_EXCEPTION or self == Status.FAILED_RUN_COMPILE_EXCEPTION or (self == Status.FAILED_RUN_RETURN)", + "docstring": "The Status return value enum for Config Fuzzer", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "ClassDef name:Status Assign Assign Assign Assign Assign Assign FunctionDef name:failing arg:self arguments arg Return return:yes BoolOp Compare Compare Compare Compare" + }, + { + "library": "scikit-learn", + "name": "_make_random_matrix", + "source_code": "def _make_random_matrix(self, n_components, n_features):\n random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(n_components, n_features, random_state=random_state)", + "docstring": "Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : ndarray of shape (n_components, n_features) The generated random matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\random_projection.py", + "ast_data": "FunctionDef name:_make_random_matrix arg:self arg:n_components arg:n_features arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "is_platform_power", + "source_code": "def is_platform_power() -> bool:\n return platform.machine() in ('ppc64', 'ppc64le')", + "docstring": "Checking if the running platform use Power architecture. Returns ------- bool True if the running platform uses ARM architecture.", + "type": "function", + "file_path": "pandas\\pandas\\compat\\__init__.py", + "ast_data": "FunctionDef name:is_platform_power arguments Return return:yes Compare Call" + }, + { + "library": "sphinx", + "name": "_config_status", + "source_code": "@staticmethod\ndef _config_status(*, old_config: Config | None, new_config: Config, verbosity: int) -> tuple[int, str]:\n if old_config is None:\n return (CONFIG_NEW, '')\n if old_config.extensions != new_config.extensions:\n old_extensions = set(old_config.extensions)\n new_extensions = set(new_config.extensions)\n extensions = old_extensions ^ new_extensions\n if len(extensions) == 1:\n extension = extensions.pop()\n else:\n extension = f'{len(extensions)}'\n return (CONFIG_EXTENSIONS_CHANGED, f' ({extension!r})')\n if (changed_keys := _differing_config_keys(old_config, new_config)):\n changed_num = len(changed_keys)\n if changed_num == 1:\n logger.info(__('The configuration has changed (1 option: %r)'), next(iter(changed_keys)))\n elif changed_num <= 5 or verbosity >= 1:\n logger.info(__('The configuration has changed (%d options: %s)'), changed_num, ', '.join(map(repr, sorted(changed_keys))))\n else:\n logger.info(__('The configuration has changed (%d options: %s, ...)'), changed_num, ', '.join(map(repr, sorted(changed_keys)[:5])))\n for item in new_config.filter(frozenset({'env'})):\n if old_config[item.name] != item.value:\n return (CONFIG_CHANGED, f' ({item.name!r})')\n return (CONFIG_OK, '')", + "docstring": "Report the differences between two Config objects. Returns a triple of: 1. The new configuration 2. A status code indicating how the configuration has changed. 3. A status message indicating what has changed.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:_config_status arguments arg arg arg If Compare Return return:yes If Compare Assign Call Assign Call Assign If Compare Call Assign Call Assign Call Return return:yes If Call Assign Call If Compare Call Call Call Call If BoolOp Compare Compare Call Call Call Call Call Call Call Call Call Call For Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "get_perpendicular", + "source_code": "def get_perpendicular(lines: Tensor, points: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(lines, ['*', 'N', '3'])\n KORNIA_CHECK_SHAPE(points, ['*', 'N', 'two'])\n if points.shape[2] == 2:\n points_h: Tensor = convert_points_to_homogeneous(points)\n elif points.shape[2] == 3:\n points_h = points\n else:\n raise AssertionError(points.shape)\n infinity_point = lines * torch.tensor([1, 1, 0], dtype=lines.dtype, device=lines.device).view(1, 1, 3)\n perp: Tensor = points_h.cross(infinity_point, dim=2)\n return perp", + "docstring": "Compute the perpendicular to a line, through the point. Args: lines: tensor containing the set of lines :math:. points: tensor containing the set of points :math:. Returns: a tensor with shape :math: containing a vector of the epipolar perpendicular lines. Each line is described as :math: and encoding the vectors as :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py", + "ast_data": "FunctionDef name:get_perpendicular arg:lines arg:points arguments arg arg Call Call If Compare Call If Compare Assign Raise Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "right_inverse", + "source_code": "def right_inverse(self, value: Tensor) -> None:\n with torch.no_grad():\n for module in reversed(self):\n if hasattr(module, 'right_inverse'):\n value = module.right_inverse(value)\n else:\n raise RuntimeError(f'parametrization {type(module).__name__} does not implement right_inverse.')\n if self.is_tensor:\n if not isinstance(value, Tensor):\n raise ValueError(f'`right_inverse` should return a tensor. Got {type(value).__name__}')\n if value.dtype != self.original.dtype:\n raise ValueError(f'The tensor returned by `right_inverse` has dtype {value.dtype} while `original` has dtype {self.original.dtype}')\n _maybe_set(self.original, value)\n else:\n if not isinstance(value, collections.abc.Sequence):\n raise ValueError(f\"'right_inverse' must return a sequence of tensors. Got {type(value).__name__}.\")\n if len(value) != self.ntensors:\n raise ValueError(f\"'right_inverse' must return a sequence of tensors of length {self.ntensors}. Got a sequence of length {len(value)}.\")\n for i, tensor in enumerate(value):\n original_i = getattr(self, f'original{i}')\n if not isinstance(tensor, Tensor):\n raise ValueError(f'`right_inverse` must return a sequence of tensors. Got element {i} of type {type(tensor).__name__}')\n if original_i.dtype != tensor.dtype:\n raise ValueError(f'Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} while `original{i}` has dtype {original_i.dtype}')\n _maybe_set(original_i, tensor)", + "docstring": "Call the ``, ... if it outputs several. Args: value (Tensor): Value to which initialize the module", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py", + "ast_data": "FunctionDef name:right_inverse arg:self arg:value arguments arg arg With Call For Call If Call Assign Call Raise Call Call If If Call Raise Call Call If Compare Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call For Call Assign Call If Call Raise Call Call If Compare Raise Call Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, *args, **kwds):\n discrete = kwds.pop('discrete', None)\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = logical_and(self._argcheck(*args), scale >= 0)\n if not np.all(cond):\n message = f'Domain error in arguments. The `scale` parameter must be positive for all distributions, and many distributions have restrictions on shape parameters. Please see the `scipy.stats.{self.name}` documentation for details.'\n raise ValueError(message)\n if np.all(scale == 0):\n return loc * ones(size, 'd')\n if rndm is not None:\n random_state_saved = self._random_state\n random_state = check_random_state(rndm)\n else:\n random_state = self._random_state\n vals = self._rvs(*args, size=size, random_state=random_state)\n vals = vals * scale + loc\n if rndm is not None:\n self._random_state = random_state_saved\n if discrete and (not isinstance(self, rv_sample)):\n if size == ():\n vals = int(vals)\n else:\n vals = vals.astype(np.int64)\n return vals", + "docstring": "Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). size : int or tuple of ints, optional Defining number of random variates (default is 1). random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `random_staterandom_statesize`.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:rvs arg:self arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Compare If Call Assign Raise Call If Call Compare Return return:yes Call If Compare Assign Assign Call Assign Assign Call Assign If Compare Assign If BoolOp Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_check_broadcast_up_to", + "source_code": "def _check_broadcast_up_to(arr_from, shape_to, name):\n shape_from = arr_from.shape\n if len(shape_to) >= len(shape_from):\n for t, f in zip(shape_to[::-1], shape_from[::-1]):\n if f != 1 and f != t:\n break\n else:\n if arr_from.size != 1 and arr_from.shape != shape_to:\n arr_from = np.ones(shape_to, arr_from.dtype) * arr_from\n return arr_from.ravel()\n raise ValueError(f'{name} argument must be able to broadcast up to shape {shape_to} but had shape {shape_from}')", + "docstring": "Helper to check that arr_from broadcasts up to shape_to", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:_check_broadcast_up_to arg:arr_from arg:shape_to arg:name arguments arg arg arg Assign If Compare Call Call For Call If BoolOp Compare Compare If BoolOp Compare Compare Assign Call Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "invalid_unique_memory_format", + "source_code": "def invalid_unique_memory_format(tensor, valid_memory_formats):\n n_legality = 0\n for memory_format in valid_memory_formats:\n if tensor.is_contiguous(memory_format=memory_format):\n n_legality += 1\n return n_legality != 1", + "docstring": "Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:invalid_unique_memory_format arg:tensor arg:valid_memory_formats arguments arg arg Assign For If Call Return return:yes Compare" + }, + { + "library": "scipy", + "name": "time_count_neighbors_shallow", + "source_code": "def time_count_neighbors_shallow(self, mn1n2, Nr):\n self.T1s.count_neighbors(self.T2s, self.r)", + "docstring": "Count neighbors for a shallow kd-tree dim | # points T1 | # points T2 | Nr", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", + "ast_data": "FunctionDef name:time_count_neighbors_shallow arg:self arg:mn1n2 arg:Nr arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "_step_fn", + "source_code": "def _step_fn(ctx, inputs):\n if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n inputs, targets = inputs\n else:\n targets = None\n if isinstance(inputs, dict):\n inputs = [inputs[input_name] for input_name in model._feed_input_names]\n _build_model(strategy, model, mode, inputs, targets)\n grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode))\n all_inputs, all_outputs, all_updates, all_session_args = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args)\n combined_fn = backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args)\n for label, output in zip(output_labels, combined_fn.outputs):\n if label == 'loss':\n reduce_op = ds_reduce_util.ReduceOp.SUM\n else:\n reduce_op = ds_reduce_util.ReduceOp.MEAN\n ctx.set_last_step_output(label, output, reduce_op)\n return combined_fn.updates_op", + "docstring": "A step fn that returns update ops.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "FunctionDef name:_step_fn arg:ctx arg:inputs arguments arg arg If BoolOp Call Compare Call Assign Assign If Call Assign Call Assign Call Call Assign Call Assign Call Call For Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "loop_pass", + "source_code": "def loop_pass(base_pass: Callable, n_iter: Optional[int]=None, predicate: Optional[Callable]=None):\n assert (n_iter is not None) ^ (predicate is not None), 'Exactly one of `n_iter`or `predicate` must be specified.'\n\n @wraps(base_pass)\n def new_pass(source):\n output = source\n if n_iter is not None and n_iter > 0:\n for _ in range(n_iter):\n output = base_pass(output)\n elif predicate is not None:\n while predicate(output):\n output = base_pass(output)\n else:\n raise RuntimeError(f'loop_pass must be given positive int n_iter (given {n_iter}) xor predicate (given {predicate})')\n return output\n return new_pass", + "docstring": "Convenience wrapper for passes which need to be applied multiple times. Exactly one of or must be specified. Args: base_pass (Callable[Object, Object]): pass to be applied in loop n_iter (int, optional): number of times to loop pass predicate (Callable[Object, bool], optional):", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py", + "ast_data": "FunctionDef name:loop_pass arg:base_pass arg:n_iter arg:predicate arguments arg arg arg Compare Compare FunctionDef name:new_pass arg:source arguments arg Assign If BoolOp Compare Compare For Call Assign Call If Compare While Call Assign Call Raise Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "urlsafe_base64_encode", + "source_code": "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b'\\n=').decode('ascii')", + "docstring": "Encode a bytestring to a base64 string for use in URLs. Strip any trailing equal signs.", + "type": "function", + "file_path": "django\\django\\utils\\http.py", + "ast_data": "FunctionDef name:urlsafe_base64_encode arg:s arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "create_onnx_friendly_decomposition_table", + "source_code": "def create_onnx_friendly_decomposition_table(registry) -> dict[torch._ops.OperatorBase, Callable]:\n decomposition_table: dict[torch._ops.OperatorBase, Callable] = {}\n _ONNX_SUPPORT_OP_OVERLOADS = _create_onnx_supports_op_overload_table(registry)\n for op_overload, decomp_fn in torch._decomp.decomposition_table.items():\n if 'torch._refs' in decomp_fn.__module__ or op_overload in _ONNX_SUPPORT_OP_OVERLOADS:\n continue\n decomposition_table[op_overload] = decomp_fn\n for op_overload, decomp_fn in torch._decomp.core_aten_decompositions().items():\n if op_overload in _ONNX_SUPPORT_OP_OVERLOADS:\n continue\n decomposition_table[op_overload] = decomp_fn\n return decomposition_table", + "docstring": "This function creates a dictionary of op overloads and their decomposition functions for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's built-in aten-to-aten decomposition. Args: registry: The ONNX registry for PyTorch. Returns: Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding decomposition functions.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\decomposition_table.py", + "ast_data": "FunctionDef name:create_onnx_friendly_decomposition_table arg:registry arguments arg Assign Call For Call If BoolOp Compare Compare Assign For Call Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_dimension_tensor_conversion_function", + "source_code": "def _dimension_tensor_conversion_function(d, dtype=None, name=None, as_ref=False):\n _ = as_ref\n if d.value is None:\n raise ValueError(f'Cannot convert unknown Dimension {d} to a Tensor.')\n if dtype is not None:\n if dtype not in (dtypes.int32, dtypes.int64):\n raise TypeError(f'Cannot convert Dimension {d} to dtype {dtype}. Allowed dtypes are tf.int32 and tf.int64.')\n else:\n dtype = dtypes.int32\n if name is None:\n name = 'shape_as_tensor'\n return constant(d.value, dtype=dtype, name=name)", + "docstring": "Function to convert Dimension to Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py", + "ast_data": "FunctionDef name:_dimension_tensor_conversion_function arg:d arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If Compare Raise Call If Compare If Compare Raise Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_axisbelow", + "source_code": "def get_axisbelow(self):\n return self._axisbelow", + "docstring": "Get whether axis ticks and gridlines are above or below most artists. Returns ------- bool or 'line' See Also -------- set_axisbelow", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_axisbelow arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "_earliest", + "source_code": "def _earliest(self, *fields):\n if fields:\n order_by = fields\n else:\n order_by = getattr(self.model._meta, 'get_latest_by')\n if order_by and (not isinstance(order_by, (tuple, list))):\n order_by = (order_by,)\n if order_by is None:\n raise ValueError(\"earliest() and latest() require either fields as positional arguments or 'get_latest_by' in the model's Meta.\")\n obj = self._chain()\n obj.query.set_limits(high=1)\n obj.query.clear_ordering(force=True)\n obj.query.add_ordering(*order_by)\n return obj.get()", + "docstring": "Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:_earliest arg:self arguments arg arg If Assign Assign Call If BoolOp Call Assign If Compare Raise Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_maybe_null_out", + "source_code": "def _maybe_null_out(result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int=1, datetimelike: bool=False) -> np.ndarray | float | NaTType:\n if mask is None and min_count == 0:\n return result\n if axis is not None and isinstance(result, np.ndarray):\n if mask is not None:\n null_mask = mask.shape[axis] - mask.sum(axis) - min_count < 0\n else:\n below_count = shape[axis] - min_count < 0\n new_shape = shape[:axis] + shape[axis + 1:]\n null_mask = np.broadcast_to(below_count, new_shape)\n if np.any(null_mask):\n if datetimelike:\n result[null_mask] = iNaT\n elif is_numeric_dtype(result):\n if np.iscomplexobj(result):\n result = result.astype('c16')\n elif not is_float_dtype(result):\n result = result.astype('f8', copy=False)\n result[null_mask] = np.nan\n else:\n result[null_mask] = None\n elif result is not NaT:\n if check_below_min_count(shape, mask, min_count):\n result_dtype = getattr(result, 'dtype', None)\n if is_float_dtype(result_dtype):\n result = result_dtype.type('nan')\n else:\n result = np.nan\n return result", + "docstring": "Returns ------- Dtype The product of all elements on a given axis. ( NaNs are treated as 1)", + "type": "function", + "file_path": "pandas\\pandas\\core\\nanops.py", + "ast_data": "FunctionDef name:_maybe_null_out arg:result arg:axis arg:mask arg:shape arg:min_count arg:datetimelike arguments arg arg arg arg arg arg If BoolOp Compare Compare Return return:yes If BoolOp Compare Call If Compare Assign Compare Call Assign Compare Assign Assign Call If Call If Assign If Call If Call Assign Call If Call Assign Call Assign Assign If Compare If Call Assign Call If Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "create_global_plan", + "source_code": "@abc.abstractmethod\ndef create_global_plan(self, global_plan: list[LoadPlan]) -> list[LoadPlan]:\n pass", + "docstring": "Compute the global load plan and return plans for each rank. . N.B. This is called on the coordinator rank only", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py", + "ast_data": "FunctionDef name:create_global_plan arg:self arg:global_plan arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "__len__", + "source_code": "def __len__(self):\n product = partial(reduce, operator.mul)\n return sum((product((len(v) for v in p.values())) if p else 1 for p in self.param_grid))", + "docstring": "Number of points on the grid.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "true_negatives", + "source_code": "@tf_export(v1=['metrics.true_negatives'])\ndef true_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_negatives is not supported when eager execution is enabled.')\n with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)):\n predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n is_true_negative = math_ops.logical_and(math_ops.equal(labels, False), math_ops.equal(predictions, False))\n return _count_condition(is_true_negative, weights, metrics_collections, updates_collections)", + "docstring": "Sum the weights of true_negatives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:true_negatives arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_update_cross_replica", + "source_code": "def _update_cross_replica(self, update_fn, value, **kwargs):\n values_util.mark_as_unsaveable()\n return self.distribute_strategy.extended.update(self, update_fn, args=(value,), kwargs=kwargs, group=True)", + "docstring": "Applies updates across replicas. Args: update_fn: A callable to pass to to update the variable. It should has the same signature as . value: value to be passed to . **kwargs: remaining arguments to . Returns: Updated variable or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_update_cross_replica arg:self arg:update_fn arg:value arguments arg arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "UnexpectedSubprocessExitError", + "source_code": "@tf_export('__internal__.distribute.multi_process_runner.UnexpectedSubprocessExitError', v1=[])\nclass UnexpectedSubprocessExitError(RuntimeError):\n\n def __init__(self, msg, mpr_result):\n super(UnexpectedSubprocessExitError, self).__init__(msg)\n self.mpr_result = mpr_result", + "docstring": "An error indicating there is at least one subprocess with unexpected exit. When this is raised, a namedtuple object representing the multi-process run result can be retrieved by 's attribute. See for more information.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "ClassDef name:UnexpectedSubprocessExitError FunctionDef name:__init__ arg:self arg:msg arg:mpr_result arguments arg arg arg Call Call Assign Call" + }, + { + "library": "pygame", + "name": "make_sound", + "source_code": "def make_sound(array):\n return mixer.Sound(array=array)", + "docstring": "pygame.sndarray.make_sound(array): return Sound Convert an array into a Sound object. Create a new playable Sound object from an array. The mixer module must be initialized and the array format must be similar to the mixer audio format.", + "type": "function", + "file_path": "pygame\\src_py\\sndarray.py", + "ast_data": "FunctionDef name:make_sound arg:array arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_init_from_proto", + "source_code": "def _init_from_proto(self, context_def, import_scope=None):\n assert isinstance(context_def, control_flow_pb2.CondContextDef)\n g = ops.get_default_graph()\n self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n self._pred = g.as_graph_element(ops.prepend_name_scope(context_def.pred_name, import_scope))\n self._pivot = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_name, import_scope))\n self._branch = context_def.branch\n super(CondContext, self).__init__(values_def=context_def.values_def, import_scope=import_scope)", + "docstring": "Creates a new from protocol buffer. Args: context_def: protocol buffer. import_scope: Optional . Name scope to add.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_init_from_proto arg:self arg:context_def arg:import_scope arguments arg arg arg Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "strategy_supports_loss_scaling", + "source_code": "def strategy_supports_loss_scaling():\n if not distribute_lib.has_strategy():\n return True\n strategy = distribute_lib.get_strategy()\n return isinstance(strategy, (collective_all_reduce_strategy.CollectiveAllReduceStrategy, collective_all_reduce_strategy.CollectiveAllReduceStrategyV1, one_device_strategy.OneDeviceStrategy, one_device_strategy.OneDeviceStrategyV1, mirrored_strategy.MirroredStrategy, mirrored_strategy.MirroredStrategyV1))", + "docstring": "Returns True if the current Strategy supports loss scaling.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:strategy_supports_loss_scaling arguments If Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_RegularizedGramianCholesky", + "source_code": "def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):\n gramian = math_ops.matmul(matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)\n if isinstance(l2_regularizer, tensor_lib.Tensor) or l2_regularizer != 0:\n matrix_shape = array_ops.shape(matrix)\n batch_shape = matrix_shape[:-2]\n if first_kind:\n small_dim = matrix_shape[-1]\n else:\n small_dim = matrix_shape[-2]\n identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)\n small_dim_static = matrix.shape[-1 if first_kind else -2]\n identity.set_shape(matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))\n gramian += l2_regularizer * identity\n return gen_linalg_ops.cholesky(gramian)", + "docstring": "Computes Cholesky factorization of regularized gramian matrix. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: =\\\\(A \\in \\Re^{m \\times n}\\\\), =\\\\(C \\in \\Re^{\\min(m, n) \\times \\min(m,n)}\\\\), =\\\\(\\lambda\\\\). If is True, returns the Cholesky factorization \\\\(L\\\\) such that \\\\(L L^H = A^H A + \\lambda I\\\\). If is False, returns the Cholesky factorization \\\\(L\\\\) such that \\\\(L L^H = A A^H + \\lambda I\\\\). Args: matrix: of shape . l2_regularizer: 0-D . Ignored if . first_kind: bool. Controls what gramian matrix to factor. Returns: output: of shape whose inner-most 2 dimensions contain the Cholesky factors \\\\(L\\\\) described above.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py", + "ast_data": "FunctionDef name:_RegularizedGramianCholesky arg:matrix arg:l2_regularizer arg:first_kind arguments arg arg arg Assign Call If BoolOp Call Compare Assign Call Assign If Assign Assign Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "vorbis_window", + "source_code": "@tf_export('signal.vorbis_window')\n@dispatch.add_dispatch_support\ndef vorbis_window(window_length, dtype=dtypes.float32, name=None):\n with ops.name_scope(name, 'vorbis_window'):\n window_length = _check_params(window_length, dtype)\n arg = math_ops.cast(math_ops.range(window_length), dtype=dtype)\n window = math_ops.sin(np.pi / 2.0 * math_ops.pow(math_ops.sin(np.pi / math_ops.cast(window_length, dtype=dtype) * (arg + 0.5)), 2.0))\n return window", + "docstring": "Generate a [Vorbis power complementary window][vorbis]. Args: window_length: A scalar indicating the window length to generate. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . [vorbis]:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py", + "ast_data": "FunctionDef name:vorbis_window arg:window_length arg:dtype arg:name arguments arg arg arg With Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "quaternion_to_axis_angle", + "source_code": "def quaternion_to_axis_angle(quaternion: Tensor) -> Tensor:\n if not torch.is_tensor(quaternion):\n raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n if not quaternion.shape[-1] == 4:\n raise ValueError(f'Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}')\n q1: Tensor = tensor([])\n q2: Tensor = tensor([])\n q3: Tensor = tensor([])\n cos_theta: Tensor = tensor([])\n cos_theta = quaternion[..., 0]\n q1 = quaternion[..., 1]\n q2 = quaternion[..., 2]\n q3 = quaternion[..., 3]\n sin_squared_theta: Tensor = q1 * q1 + q2 * q2 + q3 * q3\n sin_theta: Tensor = torch.sqrt(sin_squared_theta)\n two_theta: Tensor = 2.0 * where(cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta))\n k_pos: Tensor = two_theta / sin_theta\n k_neg: Tensor = 2.0 * torch.ones_like(sin_theta)\n k: Tensor = where(sin_squared_theta > 0.0, k_pos, k_neg)\n axis_angle: Tensor = torch.zeros_like(quaternion)[..., :3]\n axis_angle[..., 0] += q1 * k\n axis_angle[..., 1] += q2 * k\n axis_angle[..., 2] += q3 * k\n return axis_angle", + "docstring": "Convert quaternion vector to axis angle of rotation in radians. The quaternion should be in (w, x, y, z) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion: tensor with quaternions. Return: tensor with axis angle of rotation. Shape: - Input: :math: where means, any number of dimensions - Output: :math: Example: >>> quaternion = tensor((1., 0., 0., 0.)) >>> quaternion_to_axis_angle(quaternion) tensor([0., 0., 0.])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:quaternion_to_axis_angle arg:quaternion arguments arg If Call Raise Call Call If Compare Raise Call Call Call Call Call Assign Assign Assign Assign Call Call Compare Call Call Call Call Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "metrics", + "source_code": "@property\ndef metrics(self):\n metrics = []\n if self._is_compiled:\n if self.compiled_loss is not None:\n metrics += self.compiled_loss.metrics\n if self.compiled_metrics is not None:\n metrics += self.compiled_metrics.metrics\n for l in self._flatten_layers():\n metrics.extend(l._metrics)\n return metrics", + "docstring": "Returns the model's metrics added using , APIs. Note: Metrics passed to are available only after a has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"]) >>> [m.name for m in model.metrics] [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> [m.name for m in model.metrics] ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.add_metric( ... tf.reduce_sum(output_2), name='mean', aggregation='mean') >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"]) >>> model.fit(x, (y, y)) >>> [m.name for m in model.metrics] ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc', 'mean']", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:metrics arg:self arguments arg Assign If If Compare If Compare For Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assign_add", + "source_code": "def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n assign = state_ops.assign_add(self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op", + "docstring": "Adds a value to this variable. This is essentially a shortcut for . Args: delta: A . The value to add to this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A that will hold the new value of this variable after the addition has completed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:assign_add arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Assign Call If Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "NDArrayBackedExtensionBlock", + "source_code": "class NDArrayBackedExtensionBlock(EABackedBlock):\n values: NDArrayBackedExtensionArray\n\n @property\n def is_view(self) -> bool:\n return self.values._ndarray.base is not None", + "docstring": "Block backed by an NDArrayBackedExtensionArray", + "type": "class", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "ClassDef name:NDArrayBackedExtensionBlock FunctionDef name:is_view arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_disable_dynamo", + "source_code": "def _disable_dynamo(fn: Optional[Callable[_P, _T]]=None, recursive: bool=True) -> Union[Callable[_P, _T], Callable[[Callable[_P, _T]], Callable[_P, _T]]]:\n if fn is not None:\n\n @functools.wraps(fn)\n def inner(*args: _P.args, **kwargs: _P.kwargs) -> _T:\n disable_fn = getattr(fn, '__dynamo_disable', None)\n if disable_fn is None:\n import torch._dynamo\n disable_fn = torch._dynamo.disable(fn, recursive)\n fn.__dynamo_disable = disable_fn\n return disable_fn(*args, **kwargs)\n return inner\n else:\n return functools.partial(_disable_dynamo, recursive=recursive)", + "docstring": "This API should be only used inside torch, external users should still use torch._dynamo.disable. The main goal of this API is to avoid circular imports issues that is common while using _dynamo.disable inside torch itself. This API avoids it by lazily importing torch._dynamo from the import time to the invocation of the decorated function.", + "type": "function", + "file_path": "pytorch\\torch\\_compile.py", + "ast_data": "FunctionDef name:_disable_dynamo arg:fn arg:recursive arguments arg arg If Compare FunctionDef name:inner arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes Call Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_verify_static_batch_size_equality", + "source_code": "def _verify_static_batch_size_equality(tensors, columns):\n expected_batch_size = None\n for i in range(0, len(tensors)):\n batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(tensors[i].shape[0]))\n if batch_size.value is not None:\n if expected_batch_size is None:\n bath_size_column_index = i\n expected_batch_size = batch_size\n elif not expected_batch_size.is_compatible_with(batch_size):\n raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, batch_size))", + "docstring": "Verify equality between static batch sizes. Args: tensors: iterable of input tensors. columns: Corresponding feature columns. Raises: ValueError: in case of mismatched batch sizes.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_verify_static_batch_size_equality arg:tensors arg:columns arguments arg arg Assign For Call Call Assign Call Call If Compare If Compare Assign Assign If Call Raise Call Call" + }, + { + "library": "kornia", + "name": "quaternion", + "source_code": "@property\ndef quaternion(self) -> Quaternion:\n return self._rotation.q", + "docstring": "Return the underlying rotation(Quaternion).", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:quaternion arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "tfdbg_run_id", + "source_code": "def tfdbg_run_id(self):\n return self._reader.tfdbg_run_id()", + "docstring": "Get the debugger run ID of the debugged TensorFlow program.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:tfdbg_run_id arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ensures", + "source_code": "def ensures(self, graph_module: GraphModule) -> None:\n pass", + "docstring": "This function will be called after the pass is run and will check that the given graph module contains the postconditions needed to run the pass. It is not required to implement this function. Args: graph_module: The graph module we will run checks on", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py", + "ast_data": "FunctionDef name:ensures arg:self arg:graph_module arguments arg arg" + }, + { + "library": "pytorch", + "name": "update_obs_for_equalization", + "source_code": "def update_obs_for_equalization(model: GraphModule, modules: dict[str, nn.Module]) -> dict[str, _WeightEqualizationObserver]:\n weight_eq_obs_dict = {}\n for node in model.graph.nodes:\n if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):\n input_eq_obs = modules[node.target]\n assert isinstance(input_eq_obs, _InputEqualizationObserver)\n op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules)\n if op_node is None or weight_eq_obs is None:\n continue\n if op_node.op == 'call_module':\n if fused_module_supports_equalization(modules[str(op_node.target)]):\n module = modules[str(op_node.target)][0]\n assert nn_module_supports_equalization(module)\n weight_eq_obs(module.weight)\n else:\n weight_eq_obs(modules[str(op_node.target)].weight)\n equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)\n input_eq_obs.set_equalization_scale(equalization_scale)\n weight_eq_obs.set_equalization_scale(equalization_scale)\n weight_eq_obs_dict[op_node.name] = weight_eq_obs\n return weight_eq_obs_dict", + "docstring": "Update all of the observer's equalization scale. For each InputEqualizationObserver, we will find the location of the next WeightEqualizationObserver, create it, and calculate the equalization scale based on the two observers. We will then return a dictionary mapping operation node names to the corresponding WeightEqualizationObservers for that operation.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:update_obs_for_equalization arg:model arg:modules arguments arg arg Assign For If BoolOp Compare Call Assign Call Assign Call If BoolOp Compare Compare If Compare If Call Call Assign Call Call Call Call Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "django", + "name": "clone", + "source_code": "def clone(self):\n return SpatialReference(capi.clone_srs(self.ptr), axis_order=self.axis_order)", + "docstring": "Return a clone of this SpatialReference object.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "get_response", + "source_code": "def get_response(self) -> Deferred[Response]:\n return self._deferred_response", + "docstring": "Simply return a Deferred which fires when response from the asynchronous request is available", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", + "ast_data": "FunctionDef name:get_response arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_create_expansion", + "source_code": "def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0):\n total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg)\n expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg)\n if expanded_col == 0:\n return None\n max_indices = expanded_col - 1\n max_indptr = total_nnz\n max_int32 = np.iinfo(np.int32).max\n needs_int64 = max(max_indices, max_indptr) > max_int32\n index_dtype = np.int64 if needs_int64 else np.int32\n expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype)\n expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype)\n expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype)\n _csr_polynomial_expansion(X.data, X.indices, X.indptr, X.shape[1], expanded_data, expanded_indices, expanded_indptr, interaction_only, deg)\n return sparse.csr_matrix((expanded_data, expanded_indices, expanded_indptr), shape=(X.indptr.shape[0] - 1, expanded_col), dtype=X.dtype)", + "docstring": "Helper function for creating and appending sparse expansion matrices", + "type": "function", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py", + "ast_data": "FunctionDef name:_create_expansion arg:X arg:interaction_only arg:deg arg:n_features arg:cumulative_size arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:no Assign Assign Assign Call Assign Compare Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "RemoteVariable", + "source_code": "class RemoteVariable(RemoteValueImpl):\n\n def get(self):\n self._wait_and_maybe_error()\n return self._copy_to_local()", + "docstring": "A RemoteValue that represents a mutable per-worker variable.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "ClassDef name:RemoteVariable FunctionDef name:get arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "step", + "source_code": "def step(self, X0=None, T=None, N=None):\n return step(self, X0=X0, T=T, N=N)", + "docstring": "Return the step response of a continuous-time system. See for details.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:step arg:self arg:X0 arg:T arg:N arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_transform_uuid_to_ordinals", + "source_code": "def _transform_uuid_to_ordinals(candidates: list[str], uuids: list[str]) -> list[int]:\n\n def uuid_to_ordinal(candidate: str, uuids: list[str]) -> int:\n best_match = -1\n for idx, uuid in enumerate(uuids):\n if not uuid.startswith(candidate):\n continue\n if best_match != -1:\n return -1\n best_match = idx\n return best_match\n rc: list[int] = []\n for candidate in candidates:\n if torch.version.hip:\n candidate = candidate.replace('GPU-', '', 1)\n idx = uuid_to_ordinal(candidate, uuids)\n if idx < 0:\n break\n if idx in rc:\n return cast(list[int], [])\n rc.append(idx)\n return rc", + "docstring": "Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:_transform_uuid_to_ordinals arg:candidates arg:uuids arguments arg arg FunctionDef name:uuid_to_ordinal arg:candidate arg:uuids arguments arg arg Assign For Call If Call If Compare Return return:yes Assign Return return:yes For If Assign Call Assign Call If Compare If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, input_tensor: Tensor) -> Tensor:\n return apply_colormap(input_tensor, self.colormap)", + "docstring": "Apply the colormap to the input tensor. Args: input_tensor: The input tensor representing the grayscale image. .. note:: The input tensor must be integer values in the range of [0-255] or float values in the range of [0-1]. Returns: The output tensor representing the image with the applied colormap.", + "type": "method", + "file_path": "kornia\\kornia\\color\\colormap.py", + "ast_data": "FunctionDef name:forward arg:self arg:input_tensor arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "map_fn_v2", + "source_code": "@tf_export('map_fn', v1=[])\n@deprecation.deprecated_arg_values(None, 'back_prop=False is deprecated. Consider using tf.stop_gradient instead.\\nInstead of:\\nresults = tf.map_fn(fn, elems, back_prop=False)\\nUse:\\nresults = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))', warn_once=True, back_prop=False)\n@deprecation.deprecated_args(None, 'Use fn_output_signature instead', 'dtype')\ndef map_fn_v2(fn, elems, dtype=None, parallel_iterations=None, back_prop=True, swap_memory=False, infer_shape=True, name=None, fn_output_signature=None):\n if fn_output_signature is None:\n fn_output_signature = dtype\n return map_fn(fn=fn, elems=elems, fn_output_signature=fn_output_signature, parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, infer_shape=infer_shape, name=name)", + "docstring": "Transform by applying to each element unstacked on axis 0.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\map_fn.py", + "ast_data": "FunctionDef name:map_fn_v2 arg:fn arg:elems arg:dtype arg:parallel_iterations arg:back_prop arg:swap_memory arg:infer_shape arg:name arg:fn_output_signature arguments arg arg arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y, sample_weight=None):\n raw_prediction = self._linear_predictor(X)\n y = check_array(y, dtype=raw_prediction.dtype, order='C', ensure_2d=False)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)\n base_loss = self._base_loss\n if not base_loss.in_y_true_range(y):\n raise ValueError(f'Some value(s) of y are out of the valid range of the loss {base_loss.__name__}.')\n constant = np.average(base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), weights=sample_weight)\n deviance = base_loss(y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1)\n y_mean = base_loss.link.link(np.average(y, weights=sample_weight))\n deviance_null = base_loss(y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1)\n return 1 - (deviance + constant) / (deviance_null + constant)", + "docstring": "Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:. D^2 is defined as :math:, :math: is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:. The mean :math: is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign If Call Raise Call Assign Call Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "endswith", + "source_code": "def endswith(self, suffix, start=0, end=None):\n return endswith(self, suffix, start, end)", + "docstring": "Returns a boolean array which is where the string element in ends with , otherwise . See Also -------- char.endswith", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:endswith arg:self arg:suffix arg:start arg:end arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "busday_count", + "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)\ndef busday_count(begindates, enddates, weekmask=None, holidays=None, busdaycal=None, out=None):\n return (begindates, enddates, weekmask, holidays, out)", + "docstring": "busday_count( begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None ) Counts the number of valid days between and , not including the day of . If `busdaycalendar` together, containing the number of valid days between the begin and end dates. See Also -------- busdaycalendar : An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. Examples -------- >>> import numpy as np >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 >>> # Number of weekdays in 2011 >>> np.busday_count('2011', '2012') 260 >>> # Number of Saturdays in 2011 ... np.busday_count('2011', '2012', weekmask='Sat') 53", + "type": "function", + "file_path": "numpy\\numpy\\_core\\multiarray.py", + "ast_data": "FunctionDef name:busday_count arg:begindates arg:enddates arg:weekmask arg:holidays arg:busdaycal arg:out arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "pid", + "source_code": "@property\ndef pid(self) -> int:\n return self._pid", + "docstring": "ID of the process which created this tensor (an integer).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:pid arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "connectFailed", + "source_code": "def connectFailed(self, reason: Failure) -> None:\n self._tunnelReadyDeferred.errback(reason)", + "docstring": "Propagates the errback to the appropriate deferred.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py", + "ast_data": "FunctionDef name:connectFailed arg:self arg:reason arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "filter_ciflow_tags", + "source_code": "def filter_ciflow_tags(tags: set[str]) -> list[str]:\n return sorted((tag[:-2] for tag in tags if tag.startswith('ciflow/') and tag.endswith('/*')))", + "docstring": "Return sorted list of ciflow tags", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\collect_ciflow_labels.py", + "ast_data": "FunctionDef name:filter_ciflow_tags arg:tags arguments arg Return return:yes Call BoolOp Call Call" + }, + { + "library": "scipy", + "name": "erf_zeros", + "source_code": "def erf_zeros(nt):\n if floor(nt) != nt or nt <= 0 or (not isscalar(nt)):\n raise ValueError('Argument must be positive scalar integer.')\n return _specfun.cerzo(nt)", + "docstring": "Compute the first nt zero in the first quadrant, ordered by absolute value. Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and erf(conj(z)) = conj(erf(z)). Parameters ---------- nt : int The number of zeros to compute Returns ------- The locations of the zeros of erf : ndarray (complex) Complex values at which zeros of erf(z) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996. Examples -------- >>> from scipy import special >>> special.erf_zeros(1) array([1.45061616+1.880943j]) Check that erf is (close to) zero for the value returned by erf_zeros >>> special.erf(special.erf_zeros(1)) array([4.95159469e-14-1.16407394e-16j])", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:erf_zeros arg:nt arguments arg If BoolOp Compare Call Compare Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_fix_chunks", + "source_code": "def _fix_chunks(self) -> None:\n if not self._fix_chunk_order:\n return\n chunk_indices = {id(chunk): i for i, chunk in enumerate(self._chunks)}\n to_fix = [self._chunked_message]\n while to_fix:\n for field in to_fix.pop().chunked_fields:\n if field.message.chunked_fields:\n to_fix.append(field.message)\n if not field.message.HasField('chunk_index'):\n continue\n chunk_addr = self._add_chunk_order[field.message.chunk_index]\n assert chunk_addr in chunk_indices, f'Found unexpected chunk {chunk_addr}'\n new_chunk_index = chunk_indices[chunk_addr]\n field.message.chunk_index = new_chunk_index\n self._add_chunk_order = [id(chunk) for chunk in self._chunks]\n self._fix_chunk_order = False", + "docstring": "Fixes chunk indices in the ChunkedMessage.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", + "ast_data": "FunctionDef name:_fix_chunks arg:self arguments arg If Return return:no Assign Call Call Assign While For Call If Call If Call Assign Compare Assign Assign Assign Call Assign" + }, + { + "library": "django", + "name": "get_form_class", + "source_code": "def get_form_class(self):\n if self.fields is not None and self.form_class:\n raise ImproperlyConfigured(\"Specifying both 'fields' and 'form_class' is not permitted.\")\n if self.form_class:\n return self.form_class\n else:\n if self.model is not None:\n model = self.model\n elif getattr(self, 'object', None) is not None:\n model = self.object.__class__\n else:\n model = self.get_queryset().model\n if self.fields is None:\n raise ImproperlyConfigured(\"Using ModelFormMixin (base class of %s) without the 'fields' attribute is prohibited.\" % self.__class__.__name__)\n return model_forms.modelform_factory(model, fields=self.fields)", + "docstring": "Return the form class to use in this view.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:get_form_class arg:self arguments arg If BoolOp Compare Raise Call If Return return:yes If Compare Assign If Compare Call Assign Assign Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "__repeat_param_across_channels__", + "source_code": "def __repeat_param_across_channels__(self, param: Tensor, frame_num: int) -> Tensor:\n repeated = param[:, None, ...].repeat(1, frame_num, *[1] * len(param.shape[1:]))\n return repeated.reshape(-1, *list(param.shape[1:]))", + "docstring": "Repeat parameters across channels. The input is shaped as (B, ...), while to output (B * same_on_frame, ...), which to guarantee that the same transformation would happen for each frame. (B1, B2, ..., Bn) => (B1, ... B1, B2, ..., B2, ..., Bn, ..., Bn) | ch_size | | ch_size | ..., | ch_size |", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:__repeat_param_across_channels__ arg:self arg:param arg:frame_num arguments arg arg arg Assign Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "std_call", + "source_code": "def std_call(func):\n if os.name == 'nt':\n return lwingdal[func]\n else:\n return lgdal[func]", + "docstring": "Return the correct STDCALL function for certain OSR routines on Win32 platforms.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\libgdal.py", + "ast_data": "FunctionDef name:std_call arg:func arguments arg If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "printoptions", + "source_code": "@set_module('numpy')\n@contextlib.contextmanager\ndef printoptions(*args, **kwargs):\n token = _set_printoptions(*args, **kwargs)\n try:\n yield get_printoptions()\n finally:\n format_options.reset(token)", + "docstring": "Context manager for setting print options. Set print options for the scope of the block, and restore the old options at the end. See for the full description of available options. Examples -------- >>> import numpy as np >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): ... np.array([2.0]) / 3 array([0.67]) The -clause of the -statement gives the current print options: >>> with np.printoptions(precision=2) as opts: ... assert_equal(opts, np.get_printoptions()) See Also -------- set_printoptions, get_printoptions", + "type": "function", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:printoptions arguments arg arg Assign Call Try Call Call Call" + }, + { + "library": "pytorch", + "name": "_get_win_folder_from_registry", + "source_code": "def _get_win_folder_from_registry(csidl_name):\n import winreg as _winreg\n shell_folder_name = {'CSIDL_APPDATA': 'AppData', 'CSIDL_COMMON_APPDATA': 'Common AppData', 'CSIDL_LOCAL_APPDATA': 'Local AppData'}[csidl_name]\n key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders')\n dir, _type = _winreg.QueryValueEx(key, shell_folder_name)\n return dir", + "docstring": "This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names.", + "type": "function", + "file_path": "pytorch\\torch\\_appdirs.py", + "ast_data": "FunctionDef name:_get_win_folder_from_registry arg:csidl_name arguments arg Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_DomainGreaterEqual", + "source_code": "class _DomainGreaterEqual:\n\n def __init__(self, critical_value):\n self.critical_value = critical_value\n\n def __call__(self, x):\n with np.errstate(invalid='ignore'):\n return umath.less(x, self.critical_value)", + "docstring": "DomainGreaterEqual(v)(x) is True where x < v.", + "type": "class", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "ClassDef name:_DomainGreaterEqual FunctionDef name:__init__ arg:self arg:critical_value arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "transform_keypoints_", + "source_code": "def transform_keypoints_(self, M: Tensor) -> 'Keypoints':\n return self.transform_keypoints(M, inplace=True)", + "docstring": "Inplace version of :func:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\keypoints.py", + "ast_data": "FunctionDef name:transform_keypoints_ arg:self arg:M arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_rename_param_and_buffer", + "source_code": "def _rename_param_and_buffer(self, nodes: Sequence[torch.fx.Node], new_name: str) -> None:\n assert len(nodes) > 0, '`nodes` cannot be empty'\n assert len({node.target for node in nodes}) == 1, '`nodes` must all have same `target`'\n old_name = nodes[0].target\n assert isinstance(old_name, str), f'Expected str, got type({old_name})'\n normalized_name = new_name.replace('.', '/')\n attr_value = getattr(self.module, old_name)\n setattr(self.module, normalized_name, attr_value)\n delattr(self.module, old_name)\n for node in nodes:\n with self.module.graph.inserting_before(node):\n new_node = self.module.graph.get_attr(normalized_name)\n new_node.meta = node.meta\n node.replace_all_uses_with(new_node)\n self.module.graph.erase_node(node)\n logger.info(\"Renamed 'self.%s' to 'self.%s', normalized from original parameter name '%s'.\", old_name, normalized_name, new_name)", + "docstring": "Rename the parameter/buffer and replace corresponding nodes with new nodes of updated target.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\readability.py", + "ast_data": "FunctionDef name:_rename_param_and_buffer arg:self arg:nodes arg:new_name arguments arg arg arg Compare Call Compare Call Assign Call Assign Call Assign Call Call Call For With Call Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_gcd_import", + "source_code": "def _gcd_import(self, name, package=None, level=0):\n _sanity_check(name, package, level)\n if level > 0:\n name = _resolve_name(name, package, level)\n return self._find_and_load(name)", + "docstring": "Import and return the module based on its name, the package the call is being made from, and the level adjustment. This function represents the greatest common denominator of functionality between import_module and __import__. This includes setting __package__ if the loader did not.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_importer.py", + "ast_data": "FunctionDef name:_gcd_import arg:self arg:name arg:package arg:level arguments arg arg arg arg Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "trailing_slash", + "source_code": "def trailing_slash(missing=True, extra=False, status=None, debug=False):\n request = cherrypy.serving.request\n pi = request.path_info\n if debug:\n cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' % (request.is_index, missing, extra, pi), 'TOOLS.TRAILING_SLASH')\n if request.is_index is True:\n if missing:\n if not pi.endswith('/'):\n new_url = cherrypy.url(pi + '/', request.query_string)\n raise cherrypy.HTTPRedirect(new_url, status=status or 301)\n elif request.is_index is False:\n if extra:\n if pi.endswith('/') and pi != '/':\n new_url = cherrypy.url(pi[:-1], request.query_string)\n raise cherrypy.HTTPRedirect(new_url, status=status or 301)", + "docstring": "Redirect if path_info has (missing|extra) trailing slash.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:trailing_slash arg:missing arg:extra arg:status arg:debug arguments arg arg arg arg Assign Assign If Call If Compare If If Call Assign Call Raise Call BoolOp If Compare If If BoolOp Call Compare Assign Call Raise Call BoolOp" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "def public_bytes(self) -> bytes:\n raise NotImplementedError(f'public_bytes is not implemented for extension type {self!r}')", + "docstring": "Serializes the extension type to DER.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\extensions.py", + "ast_data": "FunctionDef name:public_bytes arg:self arguments arg Raise Call" + }, + { + "library": "matplotlib", + "name": "_set_artist_props", + "source_code": "def _set_artist_props(self, a):\n a.set_figure(self.get_figure(root=False))\n if not a.is_transform_set():\n a.set_transform(self.transData)\n a.axes = self\n if a.get_mouseover():\n self._mouseover_set.add(a)", + "docstring": "Set the boilerplate props for artists added to Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_set_artist_props arg:self arg:a arguments arg arg Call Call If Call Call Assign If Call Call" + }, + { + "library": "sphinx", + "name": "isbuiltin", + "source_code": "def isbuiltin(obj: Any) -> TypeIs[types.BuiltinFunctionType]:\n return inspect.isbuiltin(unpartial(obj))", + "docstring": "Check if the object is a built-in function or method. Partial objects are unwrapped before checking them. .. seealso:: :external+python:func:", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:isbuiltin arg:obj arguments arg Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_not_left_axes", + "source_code": "@property\ndef _not_left_axes(self):\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat", + "docstring": "Return a flat array of axes that aren't on the left column.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:_not_left_axes arg:self arguments arg If Compare Return return:yes Assign For Call If Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_as_proto", + "source_code": "def experimental_as_proto(self) -> types_pb2.SerializedDType:\n return types_pb2.SerializedDType(datatype=self._type_enum)", + "docstring": "Returns a proto representation of the Dtype instance.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "partition_outer_dimension", + "source_code": "def partition_outer_dimension(self, row_partition):\n if not isinstance(row_partition, RowPartition):\n raise TypeError('row_partition must be a RowPartition.')\n if self.shape.rank == 0:\n raise ValueError('Shape %s must have rank at least 1' % self.shape)\n return _partition_outer_dimension(self, row_partition)", + "docstring": "Partitions the outer dimension of this StructuredTensor. Returns a new with the same values as , where the outer dimension is partitioned into two (possibly ragged) dimensions. Requires that this StructuredTensor have an outer dimension (i.e., ). >>> st = tf.experimental.StructuredTensor.from_pyval( ... [{'foo': 12}, {'foo': 33}, {'foo': 99}]) >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> st.partition_outer_dimension(partition) }, shape=(3, None))> Args: row_partition: A . Returns: A with rank .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:partition_outer_dimension arg:self arg:row_partition arguments arg arg If Call Raise Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "bbox_to_mask", + "source_code": "def bbox_to_mask(boxes: torch.Tensor, width: int, height: int) -> torch.Tensor:\n validate_bbox(boxes)\n mask = zeros((len(boxes), height + 2, width + 2), dtype=boxes.dtype, device=boxes.device)\n box_i = (boxes + 1).long()\n for msk, bx in zip(mask, box_i):\n msk[bx[0, 1]:bx[2, 1] + 1, bx[0, 0]:bx[1, 0] + 1] = 1.0\n return mask[:, 1:-1, 1:-1]", + "docstring": "Convert 2D bounding boxes to masks. Covered area is 1. and the remaining is 0. Args: boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape of Bx4x2, where each box is defined in the following `` order: top-left, top-right, bottom-right and bottom-left. The coordinates must be in the x, y order. width: width of the masked image. height: height of the masked image. Returns: the output mask tensor. Note: It is currently non-differentiable. Examples: >>> boxes = torch.tensor([[ ... [1., 1.], ... [3., 1.], ... [3., 2.], ... [1., 2.], ... ]]) # 1x4x2 >>> bbox_to_mask(boxes, 5, 5) tensor([[[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\bbox.py", + "ast_data": "FunctionDef name:bbox_to_mask arg:boxes arg:width arg:height arguments arg arg arg Call Assign Call Call Assign Call For Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_check_orig_params_flattened", + "source_code": "def _check_orig_params_flattened(fsdp_module, ignored_params: set[nn.Parameter]) -> None:\n for param_name, param in _named_parameters_with_duplicates(fsdp_module):\n if param not in ignored_params and (not _is_fsdp_flattened(param)):\n raise RuntimeError(f'Found an unflattened parameter: {param_name}; {param.size()} {param.__class__}')", + "docstring": "Check that original parameters in ``. This should be called as a sanity check after flattening the wrapped module's parameters.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_check_orig_params_flattened arg:fsdp_module arg:ignored_params arguments arg arg For Call If BoolOp Compare Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_require_static_shapes", + "source_code": "@property\ndef experimental_require_static_shapes(self):\n return self._require_static_shapes", + "docstring": "Returns if static shape is required; otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:experimental_require_static_shapes arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "TypePromotionSnapshot", + "source_code": "@dataclasses.dataclass\nclass TypePromotionSnapshot:\n args_dtypes: Mapping[int, torch.dtype]\n 'Mapping from arg position to dtype to promote to.'\n kwargs_dtypes: Mapping[str, torch.dtype]\n 'Mapping from kwarg name to dtype to promote to.'\n out_dtype: torch.dtype\n 'Expected output dtype of the node.'", + "docstring": "Type promotion snapshot for a fx node and its inputs. Contains the promoted dtype for args and kwargs that needs promoting. Contains the expected node output dtype.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "ClassDef name:TypePromotionSnapshot" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "def get_config(self):\n config = dict(zip(self._fields, self))\n config['dtype'] = self.dtype.name\n return config", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "sym_max", + "source_code": "def sym_max(a, b):\n if overrides.has_torch_function((a, b)):\n return overrides.handle_torch_function(sym_max, (a, b), a, b)\n if isinstance(a, (SymInt, SymFloat)):\n return a.__sym_max__(b)\n elif isinstance(b, (SymInt, SymFloat)):\n return b.__sym_max__(a)\n all_types, float_types = __all_and_float_types()\n assert isinstance(a, all_types), type(a)\n assert isinstance(b, all_types), type(b)\n if isinstance(a, float_types) or isinstance(b, float_types):\n return builtins.float(builtins.max(a, b))\n else:\n return builtins.max(a, b)", + "docstring": "SymInt-aware utility for max which avoids branching on a < b. Unlike builtins.max(), this only works for int/float, and it always promotes to float if any argument is float (unlike builtins.max, which will faithfully preserve the type of the input argument).", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:sym_max arg:a arg:b arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Assign Call Call Call Call Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "float", + "source_code": "def float(self) -> Self:\n return self._apply(lambda t: t.float() if t.is_floating_point() else t)", + "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:float arg:self arguments arg Return return:yes Call arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "_get_stats", + "source_code": "def _get_stats(self):\n return _pywrap_dtensor_device.GetStats(context.context()._handle, self._device_info)", + "docstring": "Returns the number of cache hit and miss for function compilation. Returns: A dictionary. 'miss': number of cache misses; 'hit': number of cache hits; and 'size': size of cache; miss count.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", + "ast_data": "FunctionDef name:_get_stats arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_row_partitions_for_ragged_tensor", + "source_code": "def _row_partitions_for_ragged_tensor(value, rank, dtype):\n assert rank > 1\n value_row_partitions = value._nested_row_partitions[:rank - 1]\n if len(value_row_partitions) < rank - 1:\n value_row_partitions += _row_partitions_for_tensor(value.flat_values, rank - len(value_row_partitions), dtype)\n assert len(value_row_partitions) == rank - 1\n return value_row_partitions", + "docstring": "Returns the row partitions for a tf.RaggedTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_row_partitions_for_ragged_tensor arg:value arg:rank arg:dtype arguments arg arg arg Compare Assign If Compare Call Call Call Compare Call Return return:yes" + }, + { + "library": "authlib", + "name": "validate_request_object_signing_alg", + "source_code": "def validate_request_object_signing_alg(self):\n self._validate_claim_value('request_object_signing_alg')", + "docstring": "JWS [JWS] alg algorithm [JWA] that MUST be used for signing Request Objects sent to the OP. All Request Objects from this Client MUST be rejected, if not signed with this algorithm. Request Objects are described in Section 6.1 of OpenID Connect Core 1.0 [OpenID.Core]. This algorithm MUST be used both when the Request Object is passed by value (using the request parameter) and when it is passed by reference (using the request_uri parameter). Servers SHOULD support RS256. The value none MAY be used. The default, if omitted, is that any algorithm supported by the OP and the RP MAY be used.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_request_object_signing_alg arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "set_dir", + "source_code": "def set_dir(d: Union[str, os.PathLike]) -> None:\n global _hub_dir\n _hub_dir = os.path.expanduser(d)", + "docstring": "Optionally set the Torch Hub directory used to save downloaded models & weights. Args: d (str): path to a local folder to save downloaded models & weights.", + "type": "function", + "file_path": "pytorch\\torch\\hub.py", + "ast_data": "FunctionDef name:set_dir arg:d arguments arg Assign Call" + }, + { + "library": "pandas", + "name": "_wrap_aggregated_output", + "source_code": "@final\ndef _wrap_aggregated_output(self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None=None):\n if not self.as_index:\n result = self._insert_inaxis_grouper(result, qs=qs)\n result = result._consolidate()\n result.index = default_index(len(result))\n else:\n index = self._grouper.result_index\n if qs is not None:\n index = _insert_quantile_level(index, qs)\n result.index = index\n return result", + "docstring": "Wraps the output of GroupBy aggregations into the expected result. Parameters ---------- result : Series, DataFrame Returns ------- Series or DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:_wrap_aggregated_output arg:self arg:result arg:qs arguments arg arg arg If Assign Call Assign Call Assign Call Call Assign If Compare Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_device_partition_stats", + "source_code": "def get_device_partition_stats(partitions: list[Partition], devices: list[Device]) -> tuple[dict[Device, list[Partition]], dict[Device, int], list[Partition]]:\n logical_id_to_device = get_logical_id_to_device(devices)\n device_to_partitions: dict[Device, list[Partition]] = {}\n device_to_left_mem_bytes: dict[Device, int] = {}\n for d in devices:\n device_to_partitions[d] = []\n device_to_left_mem_bytes[d] = d.available_mem_bytes\n no_device_partitions = []\n for partition in partitions:\n if partition.logical_device_ids != []:\n for logical_id in partition.logical_device_ids:\n device = logical_id_to_device[logical_id]\n device_to_partitions[device].append(partition)\n device_to_left_mem_bytes[device] -= partition.used_mem_bytes\n else:\n no_device_partitions.append(partition)\n return (device_to_partitions, device_to_left_mem_bytes, no_device_partitions)", + "docstring": "Given a list of partitions and a list of devices, returns: 1. A mapping from device to partitions on it; 2. A mapping from device to its remaining memory size; 3. A list of partitions that do not have a device.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:get_device_partition_stats arg:partitions arg:devices arguments arg arg Assign Call For Assign Assign Assign For If Compare For Assign Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__next__", + "source_code": "def __next__(self):\n return next(self.iter_response)", + "docstring": "Iterate over the app response.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpwsgi.py", + "ast_data": "FunctionDef name:__next__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_define_support_grid", + "source_code": "def _define_support_grid(self, x, bw, cut, clip, gridsize):\n clip_lo = -np.inf if clip[0] is None else clip[0]\n clip_hi = +np.inf if clip[1] is None else clip[1]\n gridmin = max(x.min() - bw * cut, clip_lo)\n gridmax = min(x.max() + bw * cut, clip_hi)\n return np.linspace(gridmin, gridmax, gridsize)", + "docstring": "Create the grid of evaluation points depending for vector x.", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:_define_support_grid arg:self arg:x arg:bw arg:cut arg:clip arg:gridsize arguments arg arg arg arg arg arg Assign Compare Assign Compare Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_check_set_wise_labels", + "source_code": "def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):\n average_options = (None, 'micro', 'macro', 'weighted', 'samples')\n if average not in average_options and average != 'binary':\n raise ValueError('average has to be one of ' + str(average_options))\n y_true, y_pred = attach_unique(y_true, y_pred)\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\n present_labels = _tolist(unique_labels(y_true, y_pred))\n if average == 'binary':\n if y_type == 'binary':\n if pos_label not in present_labels:\n if len(present_labels) >= 2:\n raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}')\n labels = [pos_label]\n else:\n average_options = list(average_options)\n if y_type == 'multiclass':\n average_options.remove('samples')\n raise ValueError(\"Target is %s but average='binary'. Please choose another average setting, one of %r.\" % (y_type, average_options))\n elif pos_label not in (None, 1):\n warnings.warn(\"Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class.\" % (pos_label, average), UserWarning)\n return labels", + "docstring": "Validation associated with set-wise metrics. Returns identified labels.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py", + "ast_data": "FunctionDef name:_check_set_wise_labels arg:y_true arg:y_pred arg:average arg:labels arg:pos_label arguments arg arg arg arg arg Assign If BoolOp Compare Compare Raise Call Call Assign Call Assign Call Assign Call Call If Compare If Compare If Compare If Compare Call Raise Call Assign Assign Call If Compare Call Raise Call If Compare Call Return return:yes" + }, + { + "library": "scipy", + "name": "random_state", + "source_code": "@property\ndef random_state(self):\n return self._random_state", + "docstring": "Get or set the Generator object for generating random variates. If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:random_state arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_add_unique_metric_name", + "source_code": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n if len(self.output_names) > 1:\n if not getattr(metric_fn, '_from_serialized', False):\n metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = '%s_%d' % (base_metric_name, j)\n j += 1\n return metric_name", + "docstring": "Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_add_unique_metric_name arg:self arg:metric_name arg:metric_fn arg:output_index arguments arg arg arg arg If Compare Call If Call Assign Assign Assign While Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "build", + "source_code": "def build(self):\n if self._built:\n return\n self._variables = self._create_variables_and_slots()\n self._track_restore_info_for_cpu()\n self._built = True", + "docstring": "Create variables and slots variables for TPU embeddings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:build arg:self arguments arg If Return return:no Assign Call Call Assign" + }, + { + "library": "django", + "name": "get_dated_items", + "source_code": "def get_dated_items(self):\n qs = self.get_dated_queryset()\n date_list = self.get_date_list(qs, ordering='DESC')\n if not date_list:\n qs = qs.none()\n return (date_list, qs, {})", + "docstring": "Return (date_list, items, extra_context) for this request.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "devices", + "source_code": "def devices(self):\n return [cuda.Device(i) for i in range(cuda.runtime.getDeviceCount())]", + "docstring": "The devices supported by CuPy. Returns ------- devices : list[Device] The devices supported by CuPy. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py", + "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "hermgrid2d", + "source_code": "def hermgrid2d(x, y, c):\n return pu._gridnd(hermval, c, x, y)", + "docstring": "Evaluate a 2-D Hermite series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `axbyxyxyxyccxyxycxy`. See Also -------- hermval, hermval2d, hermval3d, hermgrid3d Examples -------- >>> from numpy.polynomial.hermite import hermgrid2d >>> x = [1, 2, 3] >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermgrid2d(x, y, c) array([[1035., 1599.], [1867., 2883.], [2699., 4167.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermgrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "create", + "source_code": "@abc.abstractmethod\ndef create(self, batch_outs):\n raise NotImplementedError('Must be implemented in subclasses.')", + "docstring": "Creates the initial results from the first batch outputs. Args: batch_outs: A list of batch-level outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:create arg:self arg:batch_outs arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "NotFoundError", + "source_code": "@tf_export('errors.NotFoundError')\nclass NotFoundError(OpError):\n\n def __init__(self, node_def, op, message, *args):\n super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND, *args)", + "docstring": "Raised when a requested entity (e.g., a file or directory) was not found. For example, running the operation could raise if it receives the name of a file that does not exist.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "ClassDef name:NotFoundError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "pandas", + "name": "next_monday", + "source_code": "def next_monday(dt: datetime) -> datetime:\n if dt.weekday() == 5:\n return dt + timedelta(2)\n elif dt.weekday() == 6:\n return dt + timedelta(1)\n return dt", + "docstring": "If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead", + "type": "function", + "file_path": "pandas\\pandas\\tseries\\holiday.py", + "ast_data": "FunctionDef name:next_monday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_graphable_type", + "source_code": "def is_graphable_type(typ) -> bool:\n return issubclass(typ, torch.fx.node.base_types)", + "docstring": "Return whether the given type is graphable", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py", + "ast_data": "FunctionDef name:is_graphable_type arg:typ arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "__finalize__", + "source_code": "@final\ndef __finalize__(self, other, method: str | None=None, **kwargs) -> Self:\n if isinstance(other, NDFrame):\n if other.attrs:\n self.attrs = deepcopy(other.attrs)\n self.flags.allows_duplicate_labels = self.flags.allows_duplicate_labels and other.flags.allows_duplicate_labels\n for name in set(self._metadata) & set(other._metadata):\n assert isinstance(name, str)\n object.__setattr__(self, name, getattr(other, name, None))\n if method == 'concat':\n objs = other.objs\n if all((bool(obj.attrs) for obj in objs)):\n attrs = objs[0].attrs\n have_same_attrs = all((obj.attrs == attrs for obj in objs[1:]))\n if have_same_attrs:\n self.attrs = deepcopy(attrs)\n allows_duplicate_labels = all((x.flags.allows_duplicate_labels for x in objs))\n self.flags.allows_duplicate_labels = allows_duplicate_labels\n return self", + "docstring": "Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where `method` are not currently considered stable across pandas releases.", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:__finalize__ arg:self arg:other arg:method arguments arg arg arg arg If Call If Assign Call Assign BoolOp For Call Call Call Call Call If Compare Assign If Call Call Assign Assign Call Compare If Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "index", + "source_code": "@cherrypy.expose\ndef index(self):\n return 'We have an
    important message for you!'", + "docstring": "Produce HTTP response body of hello world app index URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut02_expose_methods.py", + "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "num_shards_map", + "source_code": "@property\ndef num_shards_map(self) -> list[int]:\n r = [1] * self.ndim\n for i, placement in enumerate(self.placements):\n if placement.is_shard():\n shard_dim = cast(Shard, placement).dim\n r[shard_dim] *= self.mesh.size(i)\n return r", + "docstring": "dim_map is a property we derive from of the distributed tensor. Unlike , denotes how many shards each tensor dim has. Like : len(num_shards_map) == dist_tensor.ndim num_shards_map[i] = 1: means tensor dim i is not sharded num_shards_map[i] = j: means tensor dim i has j shards in total For example, we have a dist tensor of shape [18, 20, 30], a device_mesh ([[0, 1, 2, 3], [4, 5, 6, 7]]), and placements ([Shard(1), Shard(0)]), the num_shards_map of this distributed tensor would be: [4, 2, 1].", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py", + "ast_data": "FunctionDef name:num_shards_map arg:self arguments arg Assign For Call If Call Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "get_config", + "source_code": "def get_config(self, section: str, name: str, default: Any=_NO_DEFAULT) -> Any:\n if section == 'theme':\n if name == 'stylesheet':\n value = ', '.join(self.stylesheets) or default\n elif name == 'sidebars':\n value = ', '.join(self.sidebar_templates) or default\n elif name == 'pygments_style':\n value = self.pygments_style_default or default\n elif name == 'pygments_dark_style':\n value = self.pygments_style_dark or default\n else:\n value = default\n elif section == 'options':\n value = self._options.get(name, default)\n else:\n msg = __('Theme configuration sections other than [theme] and [options] are not supported (tried to get a value from %r).')\n raise ThemeError(msg)\n if value is _NO_DEFAULT:\n msg = __('setting %s.%s occurs in none of the searched theme configs') % (section, name)\n raise ThemeError(msg)\n return value", + "docstring": "Return the value for a theme configuration setting, searching the base theme chain.", + "type": "method", + "file_path": "sphinx\\sphinx\\theming.py", + "ast_data": "FunctionDef name:get_config arg:self arg:section arg:name arg:default arguments arg arg arg arg If Compare If Compare Assign BoolOp Call If Compare Assign BoolOp Call If Compare Assign BoolOp If Compare Assign BoolOp Assign If Compare Assign Call Assign Call Raise Call If Compare Assign Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_initialize_dict", + "source_code": "def _initialize_dict(self, X, random_state):\n if self.dict_init is not None:\n dictionary = self.dict_init\n else:\n _, S, dictionary = _randomized_svd(X, self._n_components, random_state=random_state)\n dictionary = S[:, np.newaxis] * dictionary\n if self._n_components <= len(dictionary):\n dictionary = dictionary[:self._n_components, :]\n else:\n dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))\n dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)\n dictionary = np.require(dictionary, requirements='W')\n return dictionary", + "docstring": "Initialization of the dictionary.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:_initialize_dict arg:self arg:X arg:random_state arguments arg arg arg If Compare Assign Assign Call Assign If Compare Call Assign Assign Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "remove_from_list", + "source_code": "def remove_from_list(self, name: _SettingsKeyT, item: Any) -> None:\n value: list[str] = self.getlist(name)\n if item not in value:\n raise ValueError(f'{item!r} not found in the {name} setting ({value!r}).')\n self.set(name, [v for v in value if v != item], self.getpriority(name) or 0)", + "docstring": "Remove *item* from the :class: setting with the specified *name*. If *item* is missing, raise :exc:. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:remove_from_list arg:self arg:name arg:item arguments arg arg arg Call If Compare Raise Call Call Compare BoolOp Call" + }, + { + "library": "matplotlib", + "name": "_revert", + "source_code": "def _revert(self, path, first_action=Path.LINETO):\n reverse_path = []\n next_code = first_action\n for code, position in path[::-1]:\n reverse_path.append((next_code, position))\n next_code = code\n return reverse_path", + "docstring": "A path is not simply reversible by path[::-1] since the code specifies an action to take from the **previous** point.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\sankey.py", + "ast_data": "FunctionDef name:_revert arg:self arg:path arg:first_action arguments arg arg arg Assign Assign For Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__eq__", + "source_code": "def __eq__(self, other):\n return isinstance(other, self.__class__) and self.to_string() == other.to_string()", + "docstring": "Checks if the DeviceSpec is same as the current instance, eg have same value for all the internal fields. Args: other: Another DeviceSpec Returns: Return if is also a DeviceSpec instance and has same value as the current instance. Return otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py", + "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare Call Call" + }, + { + "library": "tensorflow", + "name": "parse_example_spec", + "source_code": "@property\ndef parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "stop", + "source_code": "def stop(self) -> None:\n log.info('Stopping noop health check server.')", + "docstring": "Function to stop health check server", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\health_check_server.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Call" + }, + { + "library": "scikit-learn", + "name": "_fit_calibrator", + "source_code": "def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):\n Y = label_binarize(y, classes=classes)\n label_encoder = LabelEncoder().fit(classes)\n pos_class_indices = label_encoder.transform(clf.classes_)\n calibrators = []\n for class_idx, this_pred in zip(pos_class_indices, predictions.T):\n if method == 'isotonic':\n calibrator = IsotonicRegression(out_of_bounds='clip')\n else:\n calibrator = _SigmoidCalibration()\n calibrator.fit(this_pred, Y[:, class_idx], sample_weight)\n calibrators.append(calibrator)\n pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)\n return pipeline", + "docstring": "Fit calibrator(s) and return a instance. (i.e. ) calibrators are fitted. However, if equals 2, one calibrator is fitted. Parameters ---------- clf : estimator instance Fitted classifier. predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) when binary. Raw predictions returned by the un-calibrated base classifier. y : array-like, shape (n_samples,) The targets. classes : ndarray, shape (n_classes,) All the prediction classes. method : {'sigmoid', 'isotonic'} The method to use for calibration. sample_weight : ndarray, shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- pipeline : _CalibratedClassifier instance", + "type": "function", + "file_path": "scikit-learn\\sklearn\\calibration.py", + "ast_data": "FunctionDef name:_fit_calibrator arg:clf arg:predictions arg:y arg:classes arg:method arg:sample_weight arguments arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Assign For Call If Compare Assign Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "DlpackDeviceType", + "source_code": "class DlpackDeviceType(enum.IntEnum):\n CPU = 1\n CUDA = 2\n CPU_PINNED = 3\n OPENCL = 4\n VULKAN = 7\n METAL = 8\n VPI = 9\n ROCM = 10", + "docstring": "Integer enum for device type codes matching DLPack.", + "type": "class", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "ClassDef name:DlpackDeviceType Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "sphinx", + "name": "get_matching_files", + "source_code": "def get_matching_files(dirname: str | os.PathLike[str], include_patterns: Iterable[str]=('**',), exclude_patterns: Iterable[str]=()) -> Iterator[str]:\n dirname = Path(dirname).resolve()\n exclude_matchers = compile_matchers(exclude_patterns)\n include_matchers = compile_matchers(include_patterns)\n for root, dirs, files in os.walk(dirname, followlinks=True):\n relative_root = os.path.relpath(root, dirname)\n if relative_root == '.':\n relative_root = ''\n relative_root_path = Path(relative_root)\n included_files = []\n for entry in sorted(files):\n entry = _unicode_nfc((relative_root_path / entry).as_posix())\n keep = False\n for matcher in include_matchers:\n if matcher(entry):\n keep = True\n break\n for matcher in exclude_matchers:\n if matcher(entry):\n keep = False\n break\n if keep:\n included_files.append(entry)\n filtered_dirs = []\n for dir_name in sorted(dirs):\n normalised = _unicode_nfc((relative_root_path / dir_name).as_posix())\n for matcher in exclude_matchers:\n if matcher(normalised):\n break\n else:\n filtered_dirs.append(dir_name)\n dirs[:] = filtered_dirs\n yield from included_files", + "docstring": "Get all file names in a directory, recursively. Filter file names by the glob-style include_patterns and exclude_patterns. The default values include all files (\"**\") and exclude nothing (\"\"). Only files matching some pattern in *include_patterns* are included, and exclusions from *exclude_patterns* take priority over inclusions.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\matching.py", + "ast_data": "FunctionDef name:get_matching_files arg:dirname arg:include_patterns arg:exclude_patterns arguments arg arg arg Assign Call Call Assign Call Assign Call For Call Assign Call If Compare Assign Assign Call Assign For Call Assign Call Call Assign For If Call Assign For If Call Assign If Call Assign For Call Assign Call Call For If Call Call Assign" + }, + { + "library": "pytorch", + "name": "get_float32_matmul_precision", + "source_code": "def get_float32_matmul_precision() -> str:\n return _C._get_float32_matmul_precision()", + "docstring": "Returns the current value of float32 matrix multiplication precision. Refer to :func: documentation for more details.", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:get_float32_matmul_precision arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_calc_impute", + "source_code": "def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):\n donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[:, :n_neighbors]\n donors_dist = dist_pot_donors[np.arange(donors_idx.shape[0])[:, None], donors_idx]\n weight_matrix = _get_weights(donors_dist, self.weights)\n if weight_matrix is not None:\n weight_matrix[np.isnan(weight_matrix)] = 0.0\n else:\n weight_matrix = np.ones_like(donors_dist)\n weight_matrix[np.isnan(donors_dist)] = 0.0\n donors = fit_X_col.take(donors_idx)\n donors_mask = mask_fit_X_col.take(donors_idx)\n donors = np.ma.array(donors, mask=donors_mask)\n return np.ma.average(donors, axis=1, weights=weight_matrix).data", + "docstring": "Helper function to impute a single column. Parameters ---------- dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) Distance matrix between the receivers and potential donors from training set. There must be at least one non-nan distance between a receiver and a potential donor. n_neighbors : int Number of neighbors to consider. fit_X_col : ndarray of shape (n_potential_donors,) Column of potential donors from training set. mask_fit_X_col : ndarray of shape (n_potential_donors,) Missing mask for fit_X_col. Returns ------- imputed_values: ndarray of shape (n_receivers,) Imputed values for receiver.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_knn.py", + "ast_data": "FunctionDef name:_calc_impute arg:self arg:dist_pot_donors arg:n_neighbors arg:fit_X_col arg:mask_fit_X_col arguments arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "register_backend", + "source_code": "def register_backend(backend):\n backend = _backend_from_arg(backend)\n ua.register_backend(backend)", + "docstring": "Register a backend for permanent use. Registered backends have the lowest priority and will be tried after the global backend. Parameters ---------- backend : {object, 'scipy'} The backend to use. Can either be a `NotImplemented` >>> fft([1]) array([1.+0.j]) >>> set_global_backend(\"scipy\") # Restore global backend to default", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_backend.py", + "ast_data": "FunctionDef name:register_backend arg:backend arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_subscribe", + "source_code": "def _subscribe(tensor, side_effects, control_cache):\n if not tensor.dtype.is_numpy_compatible:\n logging.debug('Tensor {} has an un-supported {} type and cannot be subscribed.'.format(tensor.name, tensor.dtype))\n return tensor\n if _is_subscribed_identity(tensor):\n return _subscribe_extend(tensor, side_effects)\n name_scope = tensor.op.name + '/subscription/Identity'\n consumers = tensor.consumers()\n matching_ops = [op for op in consumers if op.name.startswith(name_scope)]\n assert len(matching_ops) <= 1, 'Op {} must only have one subscription op connected to it'.format(tensor.op.name)\n if len(matching_ops) == 1:\n candidate_tensor = matching_ops[0].outputs[0]\n if _is_subscribed_identity(candidate_tensor):\n return _subscribe_extend(candidate_tensor, side_effects)\n return _subscribe_new(tensor, side_effects, control_cache)", + "docstring": "Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py", + "ast_data": "FunctionDef name:_subscribe arg:tensor arg:side_effects arg:control_cache arguments arg arg arg If Call Call Return return:yes If Call Return return:yes Call Assign Assign Call Assign Call Compare Call Call If Compare Call Assign If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "register_watched_variable_resolver", + "source_code": "def register_watched_variable_resolver(resolver):\n global _variables_override\n assert _variables_override is default_get_variables\n _variables_override = resolver", + "docstring": "Registers the resolver to be used to get the list of variables to watch. Args: resolver: callable, takes a Variable and returns a list of Variables that shall be watched.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py", + "ast_data": "FunctionDef name:register_watched_variable_resolver arg:resolver arguments arg Compare Assign" + }, + { + "library": "kornia", + "name": "laf_is_inside_image", + "source_code": "def laf_is_inside_image(laf: Tensor, images: Tensor, border: int=0) -> Tensor:\n KORNIA_CHECK_LAF(laf)\n _, _, h, w = images.size()\n pts = laf_to_boundary_points(laf, 12)\n good_lafs_mask = (pts[..., 0] >= border) * (pts[..., 0] <= w - border) * (pts[..., 1] >= border) * (pts[..., 1] <= h - border)\n good_lafs_mask = good_lafs_mask.min(dim=2)[0]\n return good_lafs_mask", + "docstring": "Check if the LAF is touching or partly outside the image boundary. Returns the mask of LAFs, which are fully inside the image, i.e. valid. Args: laf: :math:. images: images, lafs are detected in :math:. border: additional border. Returns: mask with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\laf.py", + "ast_data": "FunctionDef name:laf_is_inside_image arg:laf arg:images arg:border arguments arg arg arg Call Assign Call Assign Call Assign Compare Compare Compare Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "init_state", + "source_code": "def init_state(self) -> None:\n for param in self.named_parameters.values():\n if param.requires_grad:\n t = torch.zeros_like(param)\n param.grad = torch.autograd.Variable(t)\n self.step(closure=None)", + "docstring": "Run a dummy optimizer step, which allows to initialize optimizer state because we do lazy init for most optimizers. This allows doing in-place loading of optimizer state from a checkpoint.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py", + "ast_data": "FunctionDef name:init_state arg:self arguments arg For Call If Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "supports_masking", + "source_code": "@property\ndef supports_masking(self):\n return self._supports_masking", + "docstring": "Whether this layer supports computing a mask using .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:supports_masking arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get", + "source_code": "def _get(self):\n if values_util.is_saving_non_distributed():\n return self._primary\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n return self._get_cross_replica()\n else:\n return self._get_replica(replica_id)", + "docstring": "Returns the value for the current device or raises a ValueError.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_get arg:self arguments arg If Call Return return:yes Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Mishra07", + "source_code": "class Mishra07(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = [(-2, 2), (-2, 2)]\n self.global_optimum = [[sqrt(self.N) for i in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return (prod(x) - factorial(self.N)) ** 2.0", + "docstring": "Mishra 7 objective function. This class defines the Mishra 7 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra07}}(x) = \\left [\\prod_{i=1}^{n} x_i - n! \\right]^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", + "ast_data": "ClassDef name:Mishra07 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "close_if_unusable_or_obsolete", + "source_code": "def close_if_unusable_or_obsolete(self):\n if self.connection is not None:\n self.health_check_done = False\n if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:\n self.close()\n return\n if self.errors_occurred:\n if self.is_usable():\n self.errors_occurred = False\n self.health_check_done = True\n else:\n self.close()\n return\n if self.close_at is not None and time.monotonic() >= self.close_at:\n self.close()\n return", + "docstring": "Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:close_if_unusable_or_obsolete arg:self arguments arg If Compare Assign If Compare Call Call Return return:no If If Call Assign Assign Call Return return:no If BoolOp Compare Compare Call Call Return return:no" + }, + { + "library": "tensorflow", + "name": "_get_tril", + "source_code": "def _get_tril(self):\n return array_ops.matrix_band_part(self._tril, -1, 0)", + "docstring": "Gets the kwarg, with upper part zero-d out.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py", + "ast_data": "FunctionDef name:_get_tril arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "huber_loss", + "source_code": "def huber_loss(input: Tensor, target: Tensor, reduction: str='mean', delta: float=1.0, weight: Optional[Tensor]=None) -> Tensor:\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(huber_loss, (input, target, weight), input, target, reduction=reduction, delta=delta, weight=weight)\n if not target.size() == input.size():\n warnings.warn(f'Using a target size ({target.size()}) that is different to the input size ({input.size()}). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.', stacklevel=2)\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n if weight is None:\n return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta)\n else:\n if weight.size() != input.size():\n raise ValueError('Weights and input must have the same size.')\n unweighted_loss = torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum('none'), delta)\n weighted_loss = unweighted_loss * weight\n if reduction == 'none':\n return weighted_loss\n elif reduction == 'sum':\n return torch.sum(weighted_loss)\n elif reduction == 'mean':\n return weighted_loss.mean()\n else:\n raise ValueError(f\"Invalid reduction mode: {reduction}. Expected one of 'none', 'mean', 'sum'.\")", + "docstring": "Compute the Huber loss, with optional weighting. Function uses a squared term if the absolute element-wise error falls below delta and a delta-scaled L1 term otherwise. When delta equals 1, this loss is equivalent to SmoothL1Loss. In general, Huber loss differs from SmoothL1Loss by a factor of delta (AKA beta in Smooth L1). See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. delta (float, optional): The threshold at which to change between delta-scaled L1 and L2 loss. Default: 1.0. weight (Tensor, optional): Weights for each sample. Default: None. Returns: Tensor: Huber loss (optionally weighted).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:huber_loss arg:input arg:target arg:reduction arg:delta arg:weight arguments arg arg arg arg arg If Call Return return:yes Call If Compare Call Call Call Call Call Assign Call If Compare Return return:yes Call Call If Compare Call Call Raise Call Assign Call Call Assign If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "_restore_state_dict", + "source_code": "def _restore_state_dict(original_module: torch.nn.Module, traced_module: torch.fx.GraphModule) -> None:\n param_buffer_table = _get_param_buffer_mapping(original_module, traced_module)\n for name, fqn in param_buffer_table.items():\n param_buffer_table[name] = fqn.replace('.', '_')\n for name, fqn in param_buffer_table.items():\n if not hasattr(traced_module, name):\n continue\n attr = getattr(traced_module, name)\n if isinstance(attr, torch.Tensor) and (not isinstance(attr, torch.nn.Parameter)):\n traced_module.register_buffer(fqn, attr)\n else:\n setattr(traced_module, fqn, attr)\n delattr(traced_module, name)\n for node in traced_module.graph.nodes:\n if node.op == 'get_attr':\n attr_name = node.target\n if attr_name in param_buffer_table:\n node.target = param_buffer_table[attr_name]\n traced_module.recompile()", + "docstring": "Restores the state dict of the traced module to that of the original module.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_restore_state_dict arg:original_module arg:traced_module arguments arg arg Assign Call For Call Assign Call For Call If Call Assign Call If BoolOp Call Call Call Call Call For If Compare Assign If Compare Assign Call" + }, + { + "library": "pytorch", + "name": "determine_grid", + "source_code": "def determine_grid(grid: TritonGrid, example_grid: Optional[TritonGrid]=None):\n if wrapper is None or callable(grid):\n return (grid, grid)\n sympy_grid = tuple((_convert_to_sympy_expr(g) for g in grid))\n if not example_grid:\n example_grid = sympy_grid\n return (wrapper.codegen_python_shape_tuple(sympy_grid), wrapper.codegen_python_shape_tuple(tuple((wrapper.generate_example_arg_value(g, type(g)) for g in example_grid))) if config.triton.autotune_at_compile_time else None)", + "docstring": "This function return a tuple of two values: the first one is for the real grid which is used in the generated code; the second one is an example grid with concreate values which is used in the autotune block to run the generated kernels at compile time.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", + "ast_data": "FunctionDef name:determine_grid arg:grid arg:example_grid arguments arg arg If BoolOp Compare Call Return return:yes Assign Call Call If Assign Return return:yes Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "_initialize", + "source_code": "def _initialize(self):\n self._cpp_trifinder.initialize()", + "docstring": "Initialize the underlying C++ object. Can be called multiple times if, for example, the triangulation is modified.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py", + "ast_data": "FunctionDef name:_initialize arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_generate_signatures", + "source_code": "def _generate_signatures(signature_functions: dict[str, Callable[..., Any]], object_map: object_identity.ObjectIdentityDictionary, defaults=None):\n signatures = {}\n for signature_key, function in sorted(signature_functions.items()):\n if function.graph.captures:\n argument_inputs = function.graph.inputs[:-len(function.graph.captures)]\n else:\n argument_inputs = function.graph.inputs\n mapped_inputs, exterior_argument_placeholders = _map_function_arguments_to_created_inputs(argument_inputs, signature_key, function.name, defaults)\n kwarg_names = list(sorted(object_map[function].function.structured_input_signature[1].keys()))\n outputs = object_map[function](**{kwarg_name: mapped_input for kwarg_name, mapped_input in zip(kwarg_names, mapped_inputs)})\n signatures[signature_key] = signature_def_utils.build_signature_def(_tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs), method_name=signature_constants.PREDICT_METHOD_NAME, defaults=defaults.get(signature_key, None))\n return signatures", + "docstring": "Validates and calls in the exported graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from ) which will be used to generate SignatureDefs. object_map: A dictionary that contains mappings from signature functions to concrete functions in the exported graph. defaults: A dictionary mapping signature_key to dictionary of user_specified_name to Tensor representing default values. Returns: Each function in the dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as , with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_generate_signatures arg:signature_functions arg:object_map arg:defaults arguments arg arg arg Assign For Call Call If Assign Call Assign Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "size", + "source_code": "def size(self, name=None):\n del name\n return constant_op.constant(len(self._tensor_array))", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "share_memory_", + "source_code": "def share_memory_(self):\n if has_torch_function_unary(self):\n return handle_torch_function(Tensor.share_memory_, (self,), self)\n self._typed_storage()._share_memory_()\n return self", + "docstring": "Moves the underlying storage to shared memory. This is a no-op if the underlying storage is already in shared memory and for CUDA tensors. Tensors in shared memory cannot be resized. See :meth: for more details.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:share_memory_ arg:self arguments arg If Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "file_path", + "source_code": "@property\ndef file_path(self):\n return self._file_path", + "docstring": "Path to the file which stores the value of the dumped tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:file_path arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_extract_tensors", + "source_code": "def _extract_tensors(obj):\n tensors: list[torch.Tensor] = []\n extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)\n extractor.dump(obj)\n return tensors", + "docstring": "This function is exclusively called from C++. See ``. It extracts the tensors contained in the given object, through pickling.", + "type": "function", + "file_path": "pytorch\\torch\\_jit_internal.py", + "ast_data": "FunctionDef name:_extract_tensors arg:obj arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "convert_temperature", + "source_code": "@xp_capabilities()\ndef convert_temperature(val: 'npt.ArrayLike', old_scale: str, new_scale: str) -> Any:\n xp = array_namespace(val)\n _val = _asarray(val, xp=xp, subok=True)\n if old_scale.lower() in ['celsius', 'c']:\n tempo = _val + zero_Celsius\n elif old_scale.lower() in ['kelvin', 'k']:\n tempo = _val\n elif old_scale.lower() in ['fahrenheit', 'f']:\n tempo = (_val - 32) * 5 / 9 + zero_Celsius\n elif old_scale.lower() in ['rankine', 'r']:\n tempo = _val * 5 / 9\n else:\n raise NotImplementedError(f'old_scale={old_scale!r} is unsupported: supported scales are Celsius, Kelvin, Fahrenheit, and Rankine')\n if new_scale.lower() in ['celsius', 'c']:\n res = tempo - zero_Celsius\n elif new_scale.lower() in ['kelvin', 'k']:\n res = tempo\n elif new_scale.lower() in ['fahrenheit', 'f']:\n res = (tempo - zero_Celsius) * 9 / 5 + 32\n elif new_scale.lower() in ['rankine', 'r']:\n res = tempo * 9 / 5\n else:\n raise NotImplementedError(f\"new_scale={new_scale!r} is unsupported: supported scales are 'Celsius', 'Kelvin', 'Fahrenheit', and 'Rankine'\")\n return res", + "docstring": "Convert from a temperature scale to another one among Celsius, Kelvin, Fahrenheit, and Rankine scales. Parameters ---------- val : array_like Value(s) of the temperature(s) to be converted expressed in the original scale. old_scale : str Specifies as a string the original scale from which the temperature value(s) will be converted. Supported scales are Celsius ('Celsius', 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine ('Rankine', 'rankine', 'R', 'r'). new_scale : str Specifies as a string the new scale to which the temperature value(s) will be converted. Supported scales are Celsius ('Celsius', 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine ('Rankine', 'rankine', 'R', 'r'). Returns ------- res : float or array of floats Value(s) of the converted temperature(s) expressed in the new scale. Notes ----- .. versionadded:: 0.18.0 Examples -------- >>> from scipy.constants import convert_temperature >>> import numpy as np >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin') array([ 233.15, 313.15])", + "type": "function", + "file_path": "scipy\\scipy\\constants\\_constants.py", + "ast_data": "FunctionDef name:convert_temperature arg:val arg:old_scale arg:new_scale arguments arg arg arg Assign Call Assign Call If Compare Call Assign If Compare Call Assign If Compare Call Assign If Compare Call Assign Raise Call If Compare Call Assign If Compare Call Assign If Compare Call Assign If Compare Call Assign Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, node_def, op, message, *args):\n super(InternalError, self).__init__(node_def, op, message, INTERNAL, *args)", + "docstring": "Creates an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call" + }, + { + "library": "cryptography", + "name": "parameter_bytes", + "source_code": "@abc.abstractmethod\ndef parameter_bytes(self, encoding: _serialization.Encoding, format: _serialization.ParameterFormat) -> bytes:\n pass", + "docstring": "Returns the parameters serialized as bytes.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py", + "ast_data": "FunctionDef name:parameter_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "django", + "name": "_get_session_key", + "source_code": "def _get_session_key(self):\n return signing.dumps(self._session, compress=True, salt='django.contrib.sessions.backends.signed_cookies', serializer=self.serializer)", + "docstring": "Instead of generating a random string, generate a secure url-safe base64-encoded string of data as our session key.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py", + "ast_data": "FunctionDef name:_get_session_key arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "OutputSpec", + "source_code": "class OutputSpec:\n\n def get_device(self) -> Optional[torch.device]:\n raise NotImplementedError(type(self).__name__)\n\n def storage_size(self) -> int:\n raise NotImplementedError(type(self).__name__)", + "docstring": "Abstract base for Layout, MultiOutputLayout, NoneLayout. Represents the memory layout of the output of an Operation.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:OutputSpec FunctionDef name:get_device arg:self arguments arg Raise Call Call FunctionDef name:storage_size arg:self arguments arg Raise Call Call" + }, + { + "library": "matplotlib", + "name": "get_text_bounds", + "source_code": "def get_text_bounds(self, renderer):\n return self._text.get_window_extent(renderer).transformed(self.get_data_transform().inverted()).bounds", + "docstring": "Return the text bounds as *(x, y, width, height)* in table coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:get_text_bounds arg:self arg:renderer arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_solve_eigen", + "source_code": "def _solve_eigen(self, X, y, shrinkage, covariance_estimator):\n self.means_ = _class_means(X, y)\n self.covariance_ = _class_cov(X, y, self.priors_, shrinkage, covariance_estimator)\n Sw = self.covariance_\n St = _cov(X, shrinkage, covariance_estimator)\n Sb = St - Sw\n evals, evecs = linalg.eigh(Sb, Sw)\n self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][:self._max_components]\n evecs = evecs[:, np.argsort(evals)[::-1]]\n self.scalings_ = evecs\n self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)\n self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)", + "docstring": "Eigenvalue solver. The eigenvalue solver computes the optimal solution of the Rayleigh coefficient (basically the ratio of between class scatter to within class scatter). This solver supports both classification and dimensionality reduction (with any covariance estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. shrinkage : 'auto', float or None Shrinkage parameter, possible values: - None: no shrinkage. - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage constant. Shrinkage parameter is ignored if i not None covariance_estimator : estimator, default=None If not None, is used to estimate the covariance matrices instead of relying the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a `` attribute like the estimators in sklearn.covariance. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Notes ----- This solver is based on [1]_, section 3.8.3, pp. 121-124. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", + "ast_data": "FunctionDef name:_solve_eigen arg:self arg:X arg:y arg:shrinkage arg:covariance_estimator arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Assign Call Assign Call Call Assign Call Assign Assign Call Call Assign Call Call Call" + }, + { + "library": "django", + "name": "BaseDatabaseValidation", + "source_code": "class BaseDatabaseValidation:\n\n def __init__(self, connection):\n self.connection = connection\n\n def __del__(self):\n del self.connection\n\n def check(self, **kwargs):\n return []\n\n def check_field(self, field, **kwargs):\n errors = []\n if hasattr(self, 'check_field_type') and (not getattr(field, 'remote_field', None)):\n db_supports_all_required_features = all((getattr(self.connection.features, feature, False) for feature in field.model._meta.required_db_features))\n if db_supports_all_required_features:\n field_type = field.db_type(self.connection)\n if field_type is not None:\n errors.extend(self.check_field_type(field, field_type))\n return errors", + "docstring": "Encapsulate backend-specific validation.", + "type": "class", + "file_path": "django\\django\\db\\backends\\base\\validation.py", + "ast_data": "ClassDef name:BaseDatabaseValidation FunctionDef name:__init__ arg:self arg:connection arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg FunctionDef name:check arg:self arguments arg arg Return return:no FunctionDef name:check_field arg:self arg:field arguments arg arg arg Assign If BoolOp Call Call Assign Call Call If Assign Call If Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Sigmoid", + "source_code": "class Sigmoid(Module):\n\n def forward(self, input: Tensor) -> Tensor:\n return torch.sigmoid(input)", + "docstring": "Applies the Sigmoid function element-wise. .. math:: \\text{Sigmoid}(x) = \\sigma(x) = \\frac{1}{1 + \\exp(-x)} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Sigmoid.png Examples:: >>> m = nn.Sigmoid() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:Sigmoid FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_cast_grad_to_param_dtype", + "source_code": "@no_type_check\ndef _cast_grad_to_param_dtype(state: _FSDPState, sharded_grad: torch.Tensor, param: FlatParameter):\n _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])\n if not _low_precision_hook_enabled(state) and sharded_grad.dtype != param.dtype:\n low_prec_grad_data = sharded_grad.data\n sharded_grad.data = sharded_grad.data.to(dtype=param.dtype)\n _no_dispatch_record_stream(low_prec_grad_data, state._device_handle.current_stream())", + "docstring": "Casts `` back to the full parameter dtype so that the optimizer step runs with that dtype. This performs an actual cast if 1. parameters were in reduced precision during the forward since then gradients would be in that reduced precision, or 2. parameters were not in reduced precision but gradients were in reduced precision for communication. However, if a low precision communication hook is registered, then this dtype cast happens in the hook instead.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_cast_grad_to_param_dtype arg:state arg:sharded_grad arg:param arguments arg arg arg Call If BoolOp Call Compare Assign Assign Call Call Call" + }, + { + "library": "kornia", + "name": "_range_bound", + "source_code": "def _range_bound(factor: Union[Tensor, float, Tuple[float, float], List[float]], name: str, center: Optional[float]=0.0, bounds: Optional[Tuple[float, float]]=(0, float('inf')), check: Optional[str]='joint', device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n if device is None:\n device = torch.device('cpu')\n if dtype is None:\n dtype = torch.get_default_dtype()\n if not isinstance(factor, Tensor):\n factor = tensor(factor, device=device, dtype=dtype)\n factor_bound: Tensor\n if factor.dim() == 0:\n if factor < 0:\n raise ValueError(f'If {name} is a single number, it must be non negative. Got {factor}.')\n if center is None or bounds is None:\n raise ValueError(f'`center` and `bounds` cannot be None for single number. Got {center}, {bounds}.')\n factor_bound = factor.repeat(2) * tensor([-1.0, 1.0], device=factor.device, dtype=factor.dtype) + center\n factor_bound = factor_bound.clamp(bounds[0], bounds[1]).to(device=device, dtype=dtype)\n else:\n factor_bound = as_tensor(factor, device=device, dtype=dtype)\n if check is not None:\n if check == 'joint':\n _joint_range_check(factor_bound, name, bounds)\n elif check == 'singular':\n _singular_range_check(factor_bound, name, bounds)\n else:\n raise NotImplementedError(f\"methods '{check}' not implemented.\")\n return factor_bound", + "docstring": "Check inputs and compute the corresponding factor bounds.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py", + "ast_data": "FunctionDef name:_range_bound arg:factor arg:name arg:center arg:bounds arg:check arg:device arg:dtype arguments arg arg arg arg arg arg arg Call If Compare Assign Call If Compare Assign Call If Call Assign Call If Compare Call If Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call Call Assign Call Call Assign Call If Compare If Compare Call If Compare Call Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "float_factorial", + "source_code": "def float_factorial(n: int) -> float:\n return float(math.factorial(n)) if n < 171 else np.inf", + "docstring": "Compute the factorial and return as a float Returns infinity when result is too large for a double", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_util.py", + "ast_data": "FunctionDef name:float_factorial arg:n arguments arg Return return:yes Compare Call Call" + }, + { + "library": "matplotlib", + "name": "_CollectionWithSizes", + "source_code": "class _CollectionWithSizes(Collection):\n _factor = 1.0\n\n def get_sizes(self):\n return self._sizes\n\n def set_sizes(self, sizes, dpi=72.0):\n if sizes is None:\n self._sizes = np.array([])\n self._transforms = np.empty((0, 3, 3))\n else:\n self._sizes = np.asarray(sizes)\n self._transforms = np.zeros((len(self._sizes), 3, 3))\n scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor\n self._transforms[:, 0, 0] = scale\n self._transforms[:, 1, 1] = scale\n self._transforms[:, 2, 2] = 1.0\n self.stale = True\n\n @artist.allow_rasterization\n def draw(self, renderer):\n self.set_sizes(self._sizes, self.get_figure(root=True).dpi)\n super().draw(renderer)", + "docstring": "Base class for collections that have an array of sizes.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "ClassDef name:_CollectionWithSizes Assign FunctionDef name:get_sizes arg:self arguments arg Return return:yes FunctionDef name:set_sizes arg:self arg:sizes arg:dpi arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Assign Assign Assign FunctionDef name:draw arg:self arg:renderer arguments arg arg Call Call Call Call" + }, + { + "library": "django", + "name": "to_python", + "source_code": "def to_python(self, value):\n if isinstance(value, str) and value.lower() in ('false', '0'):\n value = False\n else:\n value = bool(value)\n return super().to_python(value)", + "docstring": "Return a Python boolean object.", + "type": "method", + "file_path": "django\\django\\forms\\fields.py", + "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If BoolOp Call Compare Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "validate_version", + "source_code": "def validate_version(self, where=None) -> None:\n if where is not None:\n if self.is_old_version:\n ws = incompatibility_doc % '.'.join([str(x) for x in self.version])\n warnings.warn(ws, IncompatibilityWarning, stacklevel=find_stack_level())", + "docstring": "are we trying to operate on an old version?", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:validate_version arg:self arg:where arguments arg arg If Compare If Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_deprecate_positional_args", + "source_code": "def _deprecate_positional_args(func=None, *, version='1.3'):\n\n def _inner_deprecate_positional_args(f):\n sig = signature(f)\n kwonly_args = []\n all_args = []\n for name, param in sig.parameters.items():\n if param.kind == Parameter.POSITIONAL_OR_KEYWORD:\n all_args.append(name)\n elif param.kind == Parameter.KEYWORD_ONLY:\n kwonly_args.append(name)\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n extra_args = len(args) - len(all_args)\n if extra_args <= 0:\n return f(*args, **kwargs)\n args_msg = ['{}={}'.format(name, arg) for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])]\n args_msg = ', '.join(args_msg)\n warnings.warn(f'Pass {args_msg} as keyword args. From version {version} passing these as positional arguments will result in an error', FutureWarning)\n kwargs.update(zip(sig.parameters, args))\n return f(**kwargs)\n return inner_f\n if func is not None:\n return _inner_deprecate_positional_args(func)\n return _inner_deprecate_positional_args", + "docstring": "Decorator for methods that issues warnings for positional arguments. Using the keyword-only argument syntax in pep 3102, arguments after the * will issue a warning when passed as a positional argument. Parameters ---------- func : callable, default=None Function to check arguments on. version : callable, default=\"1.3\" The version when positional arguments will result in error.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:_deprecate_positional_args arg:func arguments arg arg FunctionDef name:_inner_deprecate_positional_args arg:f arguments arg Assign Call Assign Assign For Call If Compare Call If Compare Call FunctionDef name:inner_f arguments arg arg Assign Call Call If Compare Return return:yes Call Assign Call Call Assign Call Call Call Call Return return:yes Call Call Return return:yes If Compare Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_check_reducer_finalized", + "source_code": "def _check_reducer_finalized(self):\n self.reducer._check_reducer_finalized()", + "docstring": "Check if the reducer has processed all buckets and finalized the backward appropriately. It is useful to call this method after calling .backward() in your training loop in order to avoid subsequent hard to debug errors down the road due to the reducer not finalizing backward.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_check_reducer_finalized arg:self arguments arg Call" + }, + { + "library": "pandas", + "name": "values", + "source_code": "@property\ndef values(self) -> ArrayLike:\n data = self._data\n if isinstance(data, np.ndarray):\n data = data.view()\n data.flags.writeable = False\n return data", + "docstring": "Return an array representing the data in the Index. .. warning:: We recommend using :attr: or :meth:, depending on whether you need a reference to the underlying data or a NumPy array. .. versionchanged:: 3.0.0 The returned array is read-only. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. Examples -------- For :class:: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.values array([1, 2, 3]) For :class:: >>> idx = pd.interval_range(start=0, end=5) >>> idx.values [(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]] Length: 5, dtype: interval[int64, right]", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:values arg:self arguments arg Assign If Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "add_meta_graph", + "source_code": "def add_meta_graph(self, tags, signature_def_map=None, assets_list=None, clear_devices=False, init_op=None, train_op=None, saver=None):\n if not self._has_saved_variables:\n raise AssertionError('Graph state including variables and assets has not been saved yet. Please invoke `add_meta_graph_and_variables()` first.')\n signature_def_map = signature_def_map or {}\n self._validate_signature_def_map(signature_def_map)\n _add_op_to_signature_def_map(signature_def_map, init_op, constants.INIT_OP_SIGNATURE_KEY)\n _add_op_to_signature_def_map(signature_def_map, train_op, constants.TRAIN_OP_SIGNATURE_KEY)\n saver = self._maybe_create_saver(saver)\n meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices, strip_default_attrs=True)\n self._save_and_write_assets(meta_graph_def, assets_list)\n self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)", + "docstring": "Adds the current meta graph to the SavedModel. Creates a Saver in the current scope and uses the Saver to export the meta graph def. Invoking this API requires the API to have been invoked before. Args: tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def. assets_list: Assets to be saved with SavedModel. Note that this list should be a subset of the assets saved as part of the first meta graph in the SavedModel. clear_devices: Set to true if the device info on the default graph should be cleared. init_op: Op or group of ops to execute when the graph is loaded. Note that when the init_op is specified it is run after the restore op at load-time. train_op: Op or group of opts that trains the model when run. This will not be run automatically when the graph is loaded, instead saved in a SignatureDef accessible through the exported MetaGraph. saver: An instance of tf.compat.v1.train.Saver that will be used to export the metagraph. If None, a sharded Saver that restores all variables will be used. Raises: AssertionError: If the variables for the SavedModel have not been saved yet, or if the graph already contains one or more legacy init ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:add_meta_graph arg:self arg:tags arg:signature_def_map arg:assets_list arg:clear_devices arg:init_op arg:train_op arg:saver arguments arg arg arg arg arg arg arg arg If Raise Call Assign BoolOp Call Call Call Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "recursively_deserialize_keras_object", + "source_code": "def recursively_deserialize_keras_object(config, module_objects=None):\n if isinstance(config, dict):\n if 'class_name' in config:\n return generic_utils.deserialize_keras_object(config, module_objects=module_objects)\n else:\n return {key: recursively_deserialize_keras_object(config[key], module_objects) for key in config}\n if isinstance(config, (tuple, list)):\n return [recursively_deserialize_keras_object(x, module_objects) for x in config]\n else:\n raise ValueError('Unable to decode config: {}'.format(config))", + "docstring": "Deserialize Keras object from a nested structure.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:recursively_deserialize_keras_object arg:config arg:module_objects arguments arg arg If Call If Compare Return return:yes Call Return return:yes Call If Call Return return:yes Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "create_mask", + "source_code": "def create_mask(mod_fn: Union[_score_mod_signature, _mask_mod_signature], B: Optional[int], H: Optional[int], Q_LEN: int, KV_LEN: int, device: DeviceLikeType='cuda') -> Tensor:\n if B is None:\n B = 1\n if H is None:\n H = 1\n b = torch.arange(0, B, device=device)\n h = torch.arange(0, H, device=device)\n m = torch.arange(0, Q_LEN, device=device)\n n = torch.arange(0, KV_LEN, device=device)\n mod_type = _get_mod_type(mod_fn)\n from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex\n with TransformGetItemToIndex():\n if mod_type == _ModificationType.SCORE_MOD:\n score_mod = mod_fn\n score_mod = _vmap_for_bhqkv(score_mod, prefix=(0,))\n out = score_mod(torch.zeros(B, H, Q_LEN, KV_LEN, device=device), b, h, m, n)\n mask = torch.where(torch.isneginf(out), False, True)\n return mask\n elif mod_type == _ModificationType.MASK_MOD:\n mask_mod = mod_fn\n mask_mod = _vmap_for_bhqkv(mask_mod, prefix=())\n mask = mask_mod(b, h, m, n)\n return mask\n else:\n raise AssertionError", + "docstring": "This function creates a mask tensor from a mod_fn function. Args: mod_fn (Union[_score_mod_signature, _mask_mod_signature]): Function to modify attention scores. B (int): Batch size. H (int): Number of query heads. Q_LEN (int): Sequence length of query. KV_LEN (int): Sequence length of key/value. device (str): Device to run the mask creation on. Returns: mask (Tensor): A mask tensor with shape (B, H, M, N).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:create_mask arg:mod_fn arg:B arg:H arg:Q_LEN arg:KV_LEN arg:device arguments arg arg arg arg arg arg If Compare Assign If Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Call With Call If Compare Assign Assign Call Assign Call Call Assign Call Call Return return:yes If Compare Assign Assign Call Assign Call Return return:yes Raise" + }, + { + "library": "django", + "name": "select_format", + "source_code": "def select_format(self, compiler, sql, params):\n return (sql, params)", + "docstring": "Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:select_format arg:self arg:compiler arg:sql arg:params arguments arg arg arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self)\n input_features = _check_feature_names_in(self, input_features)\n return input_features[self.get_support()]", + "docstring": "Mask feature names according to selected features. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "map", + "source_code": "def map(self, mapper, na_action: Literal['ignore'] | None=None) -> Self:\n is_map = isinstance(mapper, (abc.Mapping, ABCSeries))\n fill_val = self.fill_value\n if na_action is None or notna(fill_val):\n fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)\n\n def func(sp_val):\n new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)\n if new_sp_val is fill_val or new_sp_val == fill_val:\n msg = 'fill value in the sparse values not supported'\n raise ValueError(msg)\n return new_sp_val\n sp_values = [func(x) for x in self.sp_values]\n return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)", + "docstring": "Map categories using an input mapping or function. Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to `` Examples -------- >>> arr = pd.arrays.SparseArray([0, 1, 2]) >>> arr.map(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py", + "ast_data": "FunctionDef name:map arg:self arg:mapper arg:na_action arguments arg arg arg Assign Call Assign If BoolOp Compare Call Assign Call Call FunctionDef name:func arg:sp_val arguments arg Assign Call Call If BoolOp Compare Compare Assign Raise Call Return return:yes Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_maybe_mask_results", + "source_code": "def _maybe_mask_results(self, result: np.ndarray, fill_value=iNaT, convert=None) -> np.ndarray:\n if self._hasna:\n if convert:\n result = result.astype(convert)\n if fill_value is None:\n fill_value = np.nan\n np.putmask(result, self._isnan, fill_value)\n return result", + "docstring": "Parameters ---------- result : np.ndarray fill_value : object, default iNaT convert : str, dtype or None Returns ------- result : ndarray with values replace by the fill_value mask the result if needed, convert to the provided dtype if its not None This is an internal routine.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_maybe_mask_results arg:self arg:result arg:fill_value arg:convert arguments arg arg arg arg If If Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_test_step_fn", + "source_code": "def _test_step_fn(inputs):\n if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n inputs, targets = inputs\n else:\n targets = None\n distribute_lib.get_replica_context().merge_call(_build_model, args=(model, mode, inputs, targets))\n _, outputs, updates, _ = _per_replica_execution_function(dist_utils.get_distributed_model(model, mode), mode)\n with ops.control_dependencies([updates]):\n return [array_ops.identity(out) for out in outputs]", + "docstring": "A fn that returns output of single test step.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "FunctionDef name:_test_step_fn arg:inputs arguments arg If BoolOp Call Compare Call Assign Assign Call Call Assign Call Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "static_nrows", + "source_code": "@property\ndef static_nrows(self):\n if self._row_splits is not None:\n nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0])\n if nrows_plus_one is not None:\n return nrows_plus_one - 1\n if self._row_lengths is not None:\n nrows = tensor_shape.dimension_value(self._row_lengths.shape[0])\n if nrows is not None:\n return nrows\n if self._nrows is not None:\n return tensor_util.constant_value(self._nrows)\n return None", + "docstring": "The number of rows in this partition, if statically known. Returns: The number of rows in this partition as an (if statically known); or (otherwise).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:static_nrows arg:self arguments arg If Compare Assign Call If Compare Return return:yes If Compare Assign Call If Compare Return return:yes If Compare Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "random_bernoulli", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef random_bernoulli(shape, p=0.0, dtype=None, seed=None):\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10000000.0)\n return array_ops.where_v2(random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))", + "docstring": "Returns a tensor with random bernoulli distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, , probability of bernoulli distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:random_bernoulli arg:shape arg:p arg:dtype arg:seed arguments arg arg arg arg If Compare Assign Call If Compare Assign Call Return return:yes Call Compare Call Call Call" + }, + { + "library": "django", + "name": "geom_output", + "source_code": "def geom_output(func, argtypes, offset=None):\n func.argtypes = argtypes\n if not offset:\n func.restype = c_void_p\n func.errcheck = check_geom\n else:\n func.restype = c_int\n\n def geomerrcheck(result, func, cargs):\n return check_geom_offset(result, func, cargs, offset)\n func.errcheck = geomerrcheck\n return func", + "docstring": "Generate a function that returns a Geometry either by reference or directly (if the return_geom keyword is set to True).", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", + "ast_data": "FunctionDef name:geom_output arg:func arg:argtypes arg:offset arguments arg arg arg Assign If Assign Assign Assign FunctionDef name:geomerrcheck arg:result arg:func arg:cargs arguments arg arg arg Return return:yes Call Assign Return return:yes" + }, + { + "library": "django", + "name": "quote_name_unless_alias", + "source_code": "def quote_name_unless_alias(self, name):\n if name in self.quote_cache:\n return self.quote_cache[name]\n if name in self.query.alias_map and name not in self.query.table_map or name in self.query.extra_select or (self.query.external_aliases.get(name) and name not in self.query.table_map):\n self.quote_cache[name] = name\n return name\n r = self.connection.ops.quote_name(name)\n self.quote_cache[name] = r\n return r", + "docstring": "A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL).", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\compiler.py", + "ast_data": "FunctionDef name:quote_name_unless_alias arg:self arg:name arguments arg arg If Compare Return return:yes If BoolOp BoolOp Compare Compare Compare BoolOp Call Compare Assign Return return:yes Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "nlevels", + "source_code": "@property\ndef nlevels(self) -> int:\n return len(self._levels)", + "docstring": "Integer number of levels in this MultiIndex. See Also -------- MultiIndex.levels : Get the levels of the MultiIndex. MultiIndex.codes : Get the codes of the MultiIndex. MultiIndex.from_arrays : Convert arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[\"a\"], [\"b\"], [\"c\"]]) >>> mi MultiIndex([('a', 'b', 'c')], ) >>> mi.nlevels 3", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:nlevels arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_reset_lazy_init", + "source_code": "def _reset_lazy_init(self) -> None:\n self._is_root: Optional[bool] = None", + "docstring": "Reset instance so :func: will run on the next forward.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:_reset_lazy_init arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "create_state", + "source_code": "def create_state(self, state_manager):\n pass", + "docstring": "Uses the to create state for the FeatureColumn. Args: state_manager: A to create / access resources such as lookup tables and variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py", + "ast_data": "FunctionDef name:create_state arg:self arg:state_manager arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_overload_operator", + "source_code": "@classmethod\ndef _overload_operator(cls, tensor_class, operator):\n tensor_oper = getattr(tensor_class, operator)\n tensor_oper = getattr(tensor_oper, '__func__', tensor_oper)\n setattr(cls, operator, tensor_oper)", + "docstring": "Overload an operator with the same implementation as a base Tensor class. We pull the operator out of the class dynamically to avoid ordering issues. Args: tensor_class: The (Composite)Tensor to get the method from. operator: string. The operator name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:_overload_operator arg:cls arg:tensor_class arg:operator arguments arg arg arg Assign Call Assign Call Call" + }, + { + "library": "kornia", + "name": "channels_order", + "source_code": "@property\ndef channels_order(self) -> ChannelsOrder:\n return self.layout.channels_order", + "docstring": "Return the channels order.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:channels_order arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "isdtype", + "source_code": "def isdtype(dtype: DType, kind: Union[DType, str, Tuple[Union[DType, str], ...]], *, _tuple=True) -> bool:\n if isinstance(kind, tuple) and _tuple:\n return _builtin_any((isdtype(dtype, k, _tuple=False) for k in kind))\n elif isinstance(kind, str):\n if kind == 'bool':\n return dtype == torch.bool\n elif kind == 'signed integer':\n return dtype in _int_dtypes and dtype.is_signed\n elif kind == 'unsigned integer':\n return dtype in _int_dtypes and (not dtype.is_signed)\n elif kind == 'integral':\n return dtype in _int_dtypes\n elif kind == 'real floating':\n return dtype.is_floating_point\n elif kind == 'complex floating':\n return dtype.is_complex\n elif kind == 'numeric':\n return isdtype(dtype, ('integral', 'real floating', 'complex floating'))\n else:\n raise ValueError(f'Unrecognized data type kind: {kind!r}')\n else:\n return dtype == kind", + "docstring": "Returns a boolean indicating whether a provided dtype is of a specified data type ``. Note that outside of this function, this compat library does not yet fully support complex numbers. See for more details", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_aliases.py", + "ast_data": "FunctionDef name:isdtype arg:dtype arg:kind arguments arg arg arg If BoolOp Call Return return:yes Call Call If Call If Compare Return return:yes Compare If Compare Return return:yes BoolOp Compare If Compare Return return:yes BoolOp Compare If Compare Return return:yes Compare If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call Raise Call Return return:yes Compare" + }, + { + "library": "django", + "name": "accepts", + "source_code": "def accepts(self, media_type):\n return self.accepted_type(media_type) is not None", + "docstring": "Does the client accept a response in the given media type?", + "type": "method", + "file_path": "django\\django\\http\\request.py", + "ast_data": "FunctionDef name:accepts arg:self arg:media_type arguments arg arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_get_callback_model", + "source_code": "def _get_callback_model(self):\n if hasattr(self, '_replicated_model') and self._replicated_model:\n return self._replicated_model\n if hasattr(self, 'callback_model') and self.callback_model:\n return self.callback_model\n return self", + "docstring": "Returns the Callback Model for this Model.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_get_callback_model arg:self arguments arg If BoolOp Call Return return:yes If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "values", + "source_code": "@property\ndef values(self):\n return self._values", + "docstring": "Returns the per replica values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_node_get", + "source_code": "def _node_get(node: _C.Node, key: str):\n assert isinstance(node, _C.Node)\n sel = node.kindOf(key)\n return getattr(node, sel)(key)", + "docstring": "Gets attributes of a node which is polymorphic over return type.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py", + "ast_data": "FunctionDef name:_node_get arg:node arg:key arguments arg arg Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "ceil_to_int", + "source_code": "def ceil_to_int(self, x: T, dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Convert x to dtype with ceiling semantics. See also trunc_to_int.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:ceil_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "get", + "source_code": "def get(self, o: Any) -> Any:\n return getattr(o, self.inner_name)", + "docstring": "Get the inner tensor attribute", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:get arg:self arg:o arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_minor_formatter", + "source_code": "def set_minor_formatter(self, formatter):\n self._set_formatter(formatter, self.minor)", + "docstring": "Set the formatter of the minor ticker. In addition to a instance, this also accepts a `.Axis.set_major_formatter~matplotlib.ticker.Formatter`, or function", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_minor_formatter arg:self arg:formatter arguments arg arg Call" + }, + { + "library": "cherrypy", + "name": "redirect", + "source_code": "def redirect(url='', internal=True, debug=False):\n if debug:\n cherrypy.log('Redirecting %sto: %s' % ({True: 'internal ', False: ''}[internal], url), 'TOOLS.REDIRECT')\n if internal:\n raise cherrypy.InternalRedirect(url)\n else:\n raise cherrypy.HTTPRedirect(url)", + "docstring": "Raise InternalRedirect or HTTPRedirect to the given url.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:redirect arg:url arg:internal arg:debug arguments arg arg arg If Call If Raise Call Raise Call" + }, + { + "library": "authlib", + "name": "prepare_grant_uri", + "source_code": "def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None, scope=None, state=None, **kwargs):\n params = [('response_type', response_type), ('client_id', client_id)]\n if redirect_uri:\n params.append(('redirect_uri', redirect_uri))\n if scope:\n params.append(('scope', list_to_scope(scope)))\n if state:\n params.append(('state', state))\n for k in kwargs:\n if kwargs[k] is not None:\n params.append((to_unicode(k), kwargs[k]))\n return add_params_to_uri(uri, params)", + "docstring": "Prepare the authorization grant request URI. The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the `Section 2.2Section 3.1.2Section 3.3Section 10.12Section 2.2Section 3.1.2Section 3.3section 10.12`:", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\parameters.py", + "ast_data": "FunctionDef name:prepare_grant_uri arg:uri arg:client_id arg:response_type arg:redirect_uri arg:scope arg:state arguments arg arg arg arg arg arg arg Assign If Call If Call Call If Call For If Compare Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "convolution_matrix", + "source_code": "def convolution_matrix(a, n, mode='full'):\n if n <= 0:\n raise ValueError('n must be a positive integer.')\n a = np.asarray(a)\n if a.size == 0:\n raise ValueError('len(a) must be at least 1.')\n if mode not in ('full', 'valid', 'same'):\n raise ValueError(\"'mode' argument must be one of ('full', 'valid', 'same')\")\n if a.ndim > 1:\n return np.apply_along_axis(lambda a: convolution_matrix(a, n, mode), -1, a)\n az = np.pad(a, (0, n - 1), 'constant')\n raz = np.pad(a[::-1], (0, n - 1), 'constant')\n if mode == 'same':\n trim = min(n, len(a)) - 1\n tb = trim // 2\n te = trim - tb\n col0 = az[tb:len(az) - te]\n row0 = raz[-n - tb:len(raz) - tb]\n elif mode == 'valid':\n tb = min(n, len(a)) - 1\n te = tb\n col0 = az[tb:len(az) - te]\n row0 = raz[-n - tb:len(raz) - tb]\n else:\n col0 = az\n row0 = raz[-n:]\n return toeplitz(col0, row0)", + "docstring": "Construct a convolution matrix. Constructs the Toeplitz matrix representing one-dimensional convolution [1]_. See the notes below for details. Parameters ---------- a : (..., m) array_like The 1-D array to convolve. N-dimensional arrays are treated as a batch: each slice along the last axis is a 1-D array to convolve. n : int The number of columns in the resulting matrix. It gives the length of the input to be convolved with . This is analogous to the length of in `modemodekmodeAnmodeAAnumpy.convolve` for the same coefficients and size. >>> convolution_matrix([-1, 4, -2], 5, mode='full') array([[-1, 0, 0, 0, 0], [ 4, -1, 0, 0, 0], [-2, 4, -1, 0, 0], [ 0, -2, 4, -1, 0], [ 0, 0, -2, 4, -1], [ 0, 0, 0, -2, 4], [ 0, 0, 0, 0, -2]]) >>> convolution_matrix([-1, 4, -2], 5, mode='valid') array([[-2, 4, -1, 0, 0], [ 0, -2, 4, -1, 0], [ 0, 0, -2, 4, -1]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", + "ast_data": "FunctionDef name:convolution_matrix arg:a arg:n arg:mode arguments arg arg arg If Compare Raise Call Assign Call If Compare Raise Call If Compare Raise Call If Compare Return return:yes Call arguments arg Call Assign Call Assign Call If Compare Assign Call Call Assign Assign Assign Call Assign Call If Compare Assign Call Call Assign Assign Call Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "django", + "name": "load_handler", + "source_code": "def load_handler(path, *args, **kwargs):\n return import_string(path)(*args, **kwargs)", + "docstring": "Given a path to a handler, return an instance of that handler. E.g.:: >>> from django.http import HttpRequest >>> request = HttpRequest() >>> load_handler( ... 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ... request, ... )", + "type": "function", + "file_path": "django\\django\\core\\files\\uploadhandler.py", + "ast_data": "FunctionDef name:load_handler arg:path arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "eig", + "source_code": "@tf_export('linalg.eig', 'eig', v1=[])\n@dispatch.add_dispatch_support\ndef eig(tensor, name=None):\n if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:\n out_dtype = dtypes.complex64\n elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:\n out_dtype = dtypes.complex128\n e, v = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=True, name=name)\n return (e, v)", + "docstring": "Computes the eigen decomposition of a batch of matrices. The eigenvalues and eigenvectors for a non-Hermitian matrix in general are complex. The eigenvectors are not guaranteed to be linearly independent. Computes the eigenvalues and right eigenvectors of the innermost N-by-N matrices in such that , for i=0...N-1. Args: tensor: of shape . Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is . The eigenvalues are not necessarily ordered. v: Eigenvectors. Shape is . The columns of the inner most matrices contain eigenvectors of the corresponding matrices in", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py", + "ast_data": "FunctionDef name:eig arg:tensor arg:name arguments arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "ImageClassifierTrainer", + "source_code": "class ImageClassifierTrainer(Trainer):\n\n def compute_metrics(self, *args: Tensor) -> Dict[str, float]:\n if len(args) != 2:\n raise AssertionError\n out, target = args\n acc1, acc5 = accuracy(out, target, topk=(1, 5))\n return {'top1': acc1.item(), 'top5': acc5.item()}", + "docstring": "Module to be used for image classification purposes. The module subclasses :py:class: and overrides the :py:func: function implementing a standard :py:func: topk@[1, 5]. .. seealso:: Learn how to use this class in the following __.", + "type": "class", + "file_path": "kornia\\kornia\\x\\trainers.py", + "ast_data": "ClassDef name:ImageClassifierTrainer FunctionDef name:compute_metrics arg:self arguments arg arg If Compare Call Raise Assign Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "set_other_cuda_vars", + "source_code": "def set_other_cuda_vars(environ_cp):\n if environ_cp.get('TF_CUDA_CLANG') == '1':\n write_to_bazelrc('build --config=cuda_clang')\n else:\n write_to_bazelrc('build --config=cuda')", + "docstring": "Set other CUDA related variables.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:set_other_cuda_vars arg:environ_cp arguments arg If Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "floor_to_int", + "source_code": "def floor_to_int(self, x: T, dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Convert x to dtype with ceiling semantics. See also trunc_to_int.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:floor_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "transform_var", + "source_code": "def transform_var(tensor, counter, dimension_dict):\n if isinstance(tensor, TensorType):\n res = []\n for t in tensor.__args__:\n transformed, counter = transform_dimension(t, counter, dimension_dict)\n res.append(transformed)\n assert len(res) <= 4\n if len(tensor.__args__) == 1:\n return (tensor_type.tensor1(res[0]), counter)\n elif len(tensor.__args__) == 2:\n return (tensor_type.tensor2(res[0], res[1]), counter)\n elif len(tensor.__args__) == 3:\n return (tensor_type.tensor3(res[0], res[1], res[2]), counter)\n elif len(tensor.__args__) == 4:\n return (tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter)\n elif tensor == Dyn:\n return (z3_dyn, counter)\n elif isinstance(tensor, TVar):\n return (z3.Const(tensor.tvar, tensor_type), counter)", + "docstring": "Transforms tensor variables to a format understood by z3 Args: tensor: Tensor variable or a tensor type potentially with variable dimensions Returns: Transformed variable to a z3 format", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py", + "ast_data": "FunctionDef name:transform_var arg:tensor arg:counter arg:dimension_dict arguments arg arg arg If Call Assign For Assign Call Call Compare Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Compare Return return:yes If Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "lil_array", + "source_code": "class lil_array(_lil_base, sparray):\n pass", + "docstring": "Row-based LIst of Lists sparse array. This is a structure for constructing sparse arrays incrementally. Note that inserting a single item can take linear time in the worst case; to construct the array efficiently, make sure the items are pre-sorted by index, per row. This can be instantiated in several ways: lil_array(D) where D is a 2-D ndarray lil_array(S) with another sparse array or matrix S (equivalent to S.tolil()) lil_array((M, N), [dtype]) to construct an empty array with shape (M, N) dtype is optional, defaulting to dtype='d'. Attributes ---------- dtype : dtype Data type of the array shape : 2-tuple Shape of the array ndim : int Number of dimensions (this is always 2) nnz size data LIL format data array of the array rows LIL format row index array of the array T Notes ----- Sparse arrays can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the LIL format - supports flexible slicing - changes to the array sparsity structure are efficient Disadvantages of the LIL format - arithmetic operations LIL + LIL are slow (consider CSR or CSC) - slow column slicing (consider CSC) - slow matrix vector products (consider CSR or CSC) Intended Usage - LIL is a convenient format for constructing sparse arrays - once an array has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - consider using the COO format when constructing large arrays Data Structure - An array (``.", + "type": "class", + "file_path": "scipy\\scipy\\sparse\\_lil.py", + "ast_data": "ClassDef name:lil_array" + }, + { + "library": "tensorflow", + "name": "if_stmt", + "source_code": "def if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts):\n if tensors.is_dense_tensor(cond):\n _tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)\n else:\n _py_if_stmt(cond, body, orelse)", + "docstring": "Functional form of an if statement. The conditional operates on a state, which includes all symbols whose values are a function of the branch taken. For example, given the code below that calculates the abs function: The state is represented by the variable . The orelseset_statexnonlocal`. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: cond: Boolean. body: Callable representing the main block of the conditional. orelse: Callable representing the else block of the conditional. get_state: Function that returns a tuple containing the values of all composite symbols modified within the conditional. This allows access to state that branches may mutate through side effects. This function is not needed and should not be called when dispatching to code matching Python's default semantics. This is useful for checkpointing to avoid unintended side-effects when staging requires evaluating all code-paths. set_state: Function to set the values of all composite symbols modified within the conditional. This is the complement to get_state, used to restore checkpointed values. The single argument a tuple containing values for each composite symbol that may be modified in a branch of the conditional. The is usually the result of a call to get_state. symbol_names: Tuple containing basic loop var names. nouts: Number of variables output by the statement. Vars which are not outputs will not be passed through staged control flow such as tf.cond. This includes variables that are defined before the conditional, but are not used after it.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:if_stmt arg:cond arg:body arg:orelse arg:get_state arg:set_state arg:symbol_names arg:nouts arguments arg arg arg arg arg arg arg If Call Call Call" + }, + { + "library": "kornia", + "name": "exp", + "source_code": "@staticmethod\ndef exp(theta: Tensor) -> So2:\n check_so2_theta_shape(theta)\n return So2(complex(theta.cos(), theta.sin()))", + "docstring": "Convert elements of lie algebra to elements of lie group. Args: theta: angle in radians of shape :math: or :math:. Example: >>> v = torch.tensor([3.1415/2]) >>> s = So2.exp(v) >>> s Parameter containing: tensor([4.6329e-05+1.j], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", + "ast_data": "FunctionDef name:exp arg:theta arguments arg Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_split_trackables", + "source_code": "def _split_trackables(trackable_data: List[_TrackableData]) -> Tuple[List[_TrackableData], List[_TrackableData], Dict[str, List[_TrackableData]]]:\n tensor_trackables = []\n pystate_trackables = []\n registered_trackables = collections.defaultdict(list)\n for td in trackable_data:\n saver_name = registration.get_registered_saver_name(td.object_to_save)\n if isinstance(td.object_to_save, python_state.PythonState):\n pystate_trackables.append(td)\n elif saver_name:\n registered_trackables[saver_name].append(td)\n else:\n tensor_trackables.append(td)\n return (tensor_trackables, pystate_trackables, registered_trackables)", + "docstring": "Splits Trackables into 3 categories (tensor/pystate/registered).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py", + "ast_data": "FunctionDef name:_split_trackables arg:trackable_data arguments arg Assign Assign Assign Call For Assign Call If Call Call If Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "derivative", + "source_code": "def derivative(self, n=1):\n with FITPACK_LOCK:\n tck = _fitpack_impl.splder(self._eval_args, n)\n ext = 1 if self.ext == 3 else self.ext\n return UnivariateSpline._from_tck(tck, ext=ext)", + "docstring": "Construct a new spline representing the derivative of this spline. Parameters ---------- n : int, optional Order of derivative to evaluate. Default: 1 Returns ------- spline : UnivariateSpline Spline of order k2=k-n representing the derivative of this spline. See Also -------- splder, antiderivative Notes ----- .. versionadded:: 0.13.0 Examples -------- This can be used for finding maxima of a curve: >>> import numpy as np >>> from scipy.interpolate import UnivariateSpline >>> x = np.linspace(0, 10, 70) >>> y = np.sin(x) >>> spl = UnivariateSpline(x, y, k=4, s=0) Now, differentiate the spline and find the zeros of the derivative. (NB: only works for order 3 splines, so we fit an order 4 spline): >>> spl.derivative().roots() / np.pi array([ 0.50000001, 1.5 , 2.49999998]) This agrees well with roots :math: of :math:.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "FunctionDef name:derivative arg:self arg:n arguments arg arg With Assign Call Assign Compare Return return:yes Call" + }, + { + "library": "django", + "name": "_add_local_translations", + "source_code": "def _add_local_translations(self):\n for localedir in reversed(settings.LOCALE_PATHS):\n translation = self._new_gnu_trans(localedir)\n self.merge(translation)", + "docstring": "Merge translations defined in LOCALE_PATHS.", + "type": "method", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:_add_local_translations arg:self arguments arg For Call Assign Call Call" + }, + { + "library": "pandas", + "name": "decode", + "source_code": "def decode(self, encoding, errors: str='strict', dtype: str | DtypeObj | None=None):\n if dtype is not None and (not is_string_dtype(dtype)):\n raise ValueError(f'dtype must be string or object, got dtype={dtype!r}')\n if dtype is None and get_option('future.infer_string'):\n dtype = 'str'\n if encoding in _cpython_optimized_decoders:\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n arr = self._data.array\n result = arr._str_map(f)\n return self._wrap_result(result, dtype=dtype)", + "docstring": "Decode character string in the Series/Index using indicated encoding. Equivalent to :meth: in python2 and :meth: in python3. Parameters ---------- encoding : str Specifies the encoding to be used. errors : str, optional Specifies the error handling scheme. Possible values are those supported by :meth:. dtype : str or dtype, optional The dtype of the result. When not ``. .. versionadded:: 2.3.0 Returns ------- Series or Index A Series or Index with decoded strings. See Also -------- Series.str.encode : Encodes strings into bytes in a Series/Index. Examples -------- For Series: >>> ser = pd.Series([b\"cow\", b\"123\", b\"()\"]) >>> ser.str.decode(\"ascii\") 0 cow 1 123 2 () dtype: object", + "type": "method", + "file_path": "pandas\\pandas\\core\\strings\\accessor.py", + "ast_data": "FunctionDef name:decode arg:self arg:encoding arg:errors arg:dtype arguments arg arg arg arg If BoolOp Compare Call Raise Call If BoolOp Compare Call Assign If Compare Assign arguments arg Call Assign Call Assign arguments arg Call Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_intra_op_parallelism_threads", + "source_code": "@tf_export('config.threading.set_intra_op_parallelism_threads')\ndef set_intra_op_parallelism_threads(num_threads):\n context.context().intra_op_parallelism_threads = num_threads", + "docstring": "Set number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Args: num_threads: Number of parallel threads", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_intra_op_parallelism_threads arg:num_threads arguments arg Assign Call Call" + }, + { + "library": "scipy", + "name": "mode", + "source_code": "def mode(self, df, scale):\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mode(dim, df, scale)\n return _squeeze_output(out) if out is not None else out", + "docstring": "Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:mode arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "factorize", + "source_code": "def factorize(self, use_na_sentinel: bool=True) -> tuple[np.ndarray, ExtensionArray]:\n arr, na_value = self._values_for_factorize()\n codes, uniques = factorize_array(arr, use_na_sentinel=use_na_sentinel, na_value=na_value)\n uniques_ea = self._from_factorized(uniques, self)\n return (codes, uniques_ea)", + "docstring": "Encode the extension array as an enumerated type. Parameters ---------- use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 Returns ------- codes : ndarray An integer NumPy array that's an indexer into the original ExtensionArray. uniques : ExtensionArray An ExtensionArray containing the unique values of . .. note:: uniques will *not* contain an entry for the NA value of the ExtensionArray if there are any missing values present in . See Also -------- factorize : Top-level factorize method that dispatches here. Notes ----- :meth: offers a keyword as well. Examples -------- >>> idx1 = pd.PeriodIndex( ... [\"2014-01\", \"2014-01\", \"2014-02\", \"2014-02\", \"2014-03\", \"2014-03\"], ... freq=\"M\", ... ) >>> arr, idx = idx1.factorize() >>> arr array([0, 0, 1, 1, 2, 2]) >>> idx PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:factorize arg:self arg:use_na_sentinel arguments arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "TD", + "source_code": "def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, dispatch=None):\n if f is not None:\n if isinstance(f, str):\n func_data = build_func_data(types, f)\n elif len(f) != len(types):\n raise ValueError('Number of types and f do not match')\n else:\n func_data = f\n else:\n func_data = (None,) * len(types)\n if isinstance(in_, str):\n in_ = (in_,) * len(types)\n elif in_ is None:\n in_ = (None,) * len(types)\n elif len(in_) != len(types):\n raise ValueError('Number of types and inputs do not match')\n if isinstance(out, str):\n out = (out,) * len(types)\n elif out is None:\n out = (None,) * len(types)\n elif len(out) != len(types):\n raise ValueError('Number of types and outputs do not match')\n tds = []\n for t, fd, i, o in zip(types, func_data, in_, out):\n if dispatch:\n dispt = ([k for k, v in dispatch if t in v] + [None])[0]\n else:\n dispt = None\n tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias, dispatch=dispt))\n return tds", + "docstring": "Generate a TypeDescription instance for each item in types", + "type": "function", + "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py", + "ast_data": "FunctionDef name:TD arg:types arg:f arg:astype arg:in_ arg:out arg:cfunc_alias arg:dispatch arguments arg arg arg arg arg arg arg If Compare If Call Assign Call If Compare Call Call Raise Call Assign Assign Call If Call Assign Call If Compare Assign Call If Compare Call Call Raise Call If Call Assign Call If Compare Assign Call If Compare Call Call Raise Call Assign For Call If Assign Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_options_tensor_to_options", + "source_code": "@classmethod\ndef _options_tensor_to_options(cls, serialized_options):\n options = options_lib.Options()\n if tensor_util.constant_value(serialized_options) is not None:\n pb = dataset_options_pb2.Options.FromString(tensor_util.constant_value(serialized_options))\n options._from_proto(pb)\n return options", + "docstring": "Converts options tensor to tf.data.Options object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:_options_tensor_to_options arg:cls arg:serialized_options arguments arg arg Assign Call If Compare Call Assign Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "fresh_env_used", + "source_code": "@property\ndef fresh_env_used(self) -> bool | None:\n return self._fresh_env_used", + "docstring": "True/False as to whether a new environment was created for this build, or None if the environment has not been initialised yet.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:fresh_env_used arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "eigvals", + "source_code": "@_apply_over_batch(('a', 2), ('b', 2))\ndef eigvals(a, b=None, overwrite_a=False, check_finite=True, homogeneous_eigvals=False):\n return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a, check_finite=check_finite, homogeneous_eigvals=homogeneous_eigvals)", + "docstring": "Compute eigenvalues from an ordinary or generalized eigenvalue problem. Find eigenvalues of a general matrix:: a vr[:,i] = w[i] b vr[:,i] Parameters ---------- a : (M, M) array_like A complex or real matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional Right-hand side matrix in a generalized eigenvalue problem. If omitted, identity matrix is assumed. overwrite_a : bool, optional Whether to overwrite data in a (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. homogeneous_eigvals : bool, optional If True, return the eigenvalues in homogeneous coordinates. In this case ``. Raises ------ LinAlgError If eigenvalue computation does not converge See Also -------- eig : eigenvalues and right eigenvectors of general arrays. eigvalsh : eigenvalues of symmetric or Hermitian arrays eigvals_banded : eigenvalues for symmetric/Hermitian band matrices eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal matrices Examples -------- >>> import numpy as np >>> from scipy import linalg >>> a = np.array([[0., -1.], [1., 0.]]) >>> linalg.eigvals(a) array([0.+1.j, 0.-1.j]) >>> b = np.array([[0., 1.], [1., 1.]]) >>> linalg.eigvals(a, b) array([ 1.+0.j, -1.+0.j]) >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) >>> linalg.eigvals(a, homogeneous_eigvals=True) array([[3.+0.j, 8.+0.j, 7.+0.j], [1.+0.j, 1.+0.j, 1.+0.j]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp.py", + "ast_data": "FunctionDef name:eigvals arg:a arg:b arg:overwrite_a arg:check_finite arg:homogeneous_eigvals arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "put_back", + "source_code": "def put_back(self, closure):\n assert closure.tag is None\n with self._queue_lock:\n if self._inflight_closure_count < 1:\n raise AssertionError('There is no inflight closures to put_back.')\n if self._error:\n closure.mark_cancelled()\n else:\n self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n self._queue.put(closure, block=False)\n metric_utils.monitor_int('queued_closures', self._queue.qsize())\n self._closures_queued_condition.notify()\n self.inflight_closure_count -= 1\n if self._inflight_closure_count == 0:\n self._no_inflight_closure_condition.notify_all()", + "docstring": "Put the closure back into the queue as it was not properly executed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:put_back arg:self arg:closure arguments arg arg Compare With If Compare Raise Call If Call Call arguments Call Call Call Call Call If Compare Call" + }, + { + "library": "pytorch", + "name": "complete_wheel", + "source_code": "def complete_wheel(folder: str) -> str:\n wheel_name = list_dir(f'/{folder}/dist')[0]\n if 'pytorch' in folder and (not enable_cuda):\n print('Repairing Wheel with AuditWheel')\n check_call(['auditwheel', 'repair', f'dist/{wheel_name}'], cwd=folder)\n repaired_wheel_name = list_dir(f'/{folder}/wheelhouse')[0]\n print(f'Moving {repaired_wheel_name} wheel to /{folder}/dist')\n os.rename(f'/{folder}/wheelhouse/{repaired_wheel_name}', f'/{folder}/dist/{repaired_wheel_name}')\n else:\n repaired_wheel_name = wheel_name.replace('linux_aarch64', 'manylinux_2_28_aarch64')\n print(f'Renaming {wheel_name} wheel to {repaired_wheel_name}')\n os.rename(f'/{folder}/dist/{wheel_name}', f'/{folder}/dist/{repaired_wheel_name}')\n print(f'Copying {repaired_wheel_name} to artifacts')\n shutil.copy2(f'/{folder}/dist/{repaired_wheel_name}', f'/artifacts/{repaired_wheel_name}')\n return repaired_wheel_name", + "docstring": "Complete wheel build and put in artifact location", + "type": "function", + "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py", + "ast_data": "FunctionDef name:complete_wheel arg:folder arguments arg Assign Call If BoolOp Compare Call Call Assign Call Call Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "half", + "source_code": "def half(self):\n return self._to(torch.half)", + "docstring": "Casts this storage to half type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:half arg:self arguments arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "create_temporary_credential", + "source_code": "def create_temporary_credential(self, request):\n raise NotImplementedError()", + "docstring": "Generate and save a temporary credential into database or cache. A temporary credential is used for exchanging token credential. This method should be re-implemented:: def create_temporary_credential(self, request): oauth_token = generate_token(36) oauth_token_secret = generate_token(48) temporary_credential = TemporaryCredential( oauth_token=oauth_token, oauth_token_secret=oauth_token_secret, client_id=request.client_id, redirect_uri=request.redirect_uri, ) # if the credential has a save method temporary_credential.save() return temporary_credential :param request: OAuth1Request instance :return: TemporaryCredential instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py", + "ast_data": "FunctionDef name:create_temporary_credential arg:self arg:request arguments arg arg Raise Call" + }, + { + "library": "authlib", + "name": "revoke_access_token", + "source_code": "def revoke_access_token(self, token, request):\n raise NotImplementedError()", + "docstring": "Revoke a token access in case an invalid client has been requested. Developers MUST implement this method in subclass:: def revoke_access_token(self, token, request): token.revoked = True token.save()", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", + "ast_data": "FunctionDef name:revoke_access_token arg:self arg:token arg:request arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "max", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef max(x, axis=None, keepdims=False):\n return math_ops.reduce_max(x, axis, keepdims)", + "docstring": "Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with maximum values of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:max arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_unstack", + "source_code": "def _unstack(self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_]):\n new_values, mask = unstacker.get_new_values(self.values.T, fill_value=fill_value)\n mask = mask.any(0)\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n bp = BlockPlacement(new_placement)\n blocks = [new_block_2d(new_values, placement=bp)]\n return (blocks, mask)", + "docstring": "Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of we should keep.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:_unstack arg:self arg:unstacker arg:fill_value arg:new_placement arg:needs_masking arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_gather_to", + "source_code": "def _gather_to(self, value, destinations, axis, options=None):\n _require_cross_replica_or_default_context_extended(self)\n assert not isinstance(destinations, (list, tuple))\n if options is None:\n options = collective_util.Options()\n return self._gather_to_implementation(value, destinations, axis, options)", + "docstring": "Gather across replicas along axis-th dimension to . gathers or -like object, along -th dimension. It supports only dense tensors but NOT sparse tensor. This API can only be called in cross-replica context. Args: value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to reduce to. To perform an all-gather, pass the same to and . Note that if it's a , the value is reduced to the devices of that variable, and this method doesn't update the variable. axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the range [0, rank(value)). options: a . Options to perform collective operations. This overrides the default options if the takes one in the constructor. See for details of the options. Returns: A tensor or value gathered to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:_gather_to arg:self arg:value arg:destinations arg:axis arg:options arguments arg arg arg arg arg Call Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_build_y", + "source_code": "def _build_y(self, X, y, sample_weight, trim_duplicates=True):\n self._check_input_data_shape(X)\n X = X.reshape(-1)\n if self.increasing == 'auto':\n self.increasing_ = check_increasing(X, y)\n else:\n self.increasing_ = self.increasing\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n mask = sample_weight > 0\n X, y, sample_weight = (X[mask], y[mask], sample_weight[mask])\n order = np.lexsort((y, X))\n X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]\n unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)\n X = unique_X\n y = isotonic_regression(unique_y, sample_weight=unique_sample_weight, y_min=self.y_min, y_max=self.y_max, increasing=self.increasing_)\n self.X_min_, self.X_max_ = (np.min(X), np.max(X))\n if trim_duplicates:\n keep_data = np.ones((len(y),), dtype=bool)\n keep_data[1:-1] = np.logical_or(np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]))\n return (X[keep_data], y[keep_data])\n else:\n return (X, y)", + "docstring": "Build the y_ IsotonicRegression.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:_build_y arg:self arg:X arg:y arg:sample_weight arg:trim_duplicates arguments arg arg arg arg arg Call Assign Call If Compare Assign Call Assign Assign Call Assign Compare Assign Assign Call Assign Assign Call Assign Assign Call Assign Call Call If Assign Call Call Assign Call Call Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "calculate_areas", + "source_code": "def calculate_areas(self):\n if self._dim == 2:\n return self._calculate_areas_2d()\n elif self._dim == 3:\n return self._calculate_areas_3d()\n else:\n raise TypeError('Only supported for 2D and 3D point sets')", + "docstring": "Calculates the areas of the Voronoi regions. For 2D point sets, the regions are circular arcs. The sum of the areas is ``. .. versionadded:: 1.5.0 Returns ------- areas : double array of shape (npoints,) The areas of the Voronoi regions.", + "type": "method", + "file_path": "scipy\\scipy\\spatial\\_spherical_voronoi.py", + "ast_data": "FunctionDef name:calculate_areas arg:self arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "initial_seed", + "source_code": "def initial_seed() -> int:\n return default_generator.initial_seed()", + "docstring": "Returns the initial seed for generating random numbers as a Python . .. note:: The returned seed is for the default generator on CPU only.", + "type": "function", + "file_path": "pytorch\\torch\\random.py", + "ast_data": "FunctionDef name:initial_seed arguments Return return:yes Call" + }, + { + "library": "pandas", + "name": "concat_horizontal", + "source_code": "@classmethod\ndef concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n offset = 0\n blocks: list[Block] = []\n for mgr in mgrs:\n for blk in mgr.blocks:\n nb = blk.slice_block_columns(slice(None))\n nb._mgr_locs = nb._mgr_locs.add(offset)\n blocks.append(nb)\n offset += len(mgr.items)\n new_mgr = cls(tuple(blocks), axes)\n return new_mgr", + "docstring": "Concatenate uniformly-indexed BlockManagers horizontally.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:concat_horizontal arg:cls arg:mgrs arg:axes arguments arg arg arg Assign For For Assign Call Call Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "receive_data_chunk", + "source_code": "def receive_data_chunk(self, raw_data, start):\n if self.activated:\n self.file.write(raw_data)\n else:\n return raw_data", + "docstring": "Add the data to the BytesIO file.", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadhandler.py", + "ast_data": "FunctionDef name:receive_data_chunk arg:self arg:raw_data arg:start arguments arg arg arg If Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "intersects_path", + "source_code": "def intersects_path(self, other, filled=True):\n return _path.path_intersects_path(self, other, filled)", + "docstring": "Return whether if this path intersects another given path. If *filled* is True, then this also returns True if one path completely encloses the other (i.e., the paths are treated as filled).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:intersects_path arg:self arg:other arg:filled arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "from_tensors", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef from_tensors(self, tensors):\n return super().from_tensors(tensors)", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_check_unsampled_image", + "source_code": "def _check_unsampled_image(self):\n return False", + "docstring": "Return False. Do not use unsampled image.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:_check_unsampled_image arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "flush", + "source_code": "def flush(self):\n self.clear()\n self.delete()\n self._session_key = None", + "docstring": "Remove the current session data from the database and regenerate the key.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "FunctionDef name:flush arg:self arguments arg Call Call Assign" + }, + { + "library": "sphinx", + "name": "get_refnodes", + "source_code": "def get_refnodes(self, doctree: Node, result: list[dict[str, Any]]) -> list[dict[str, Any]]:\n if isinstance(doctree, nodes.reference) and doctree.get('refuri'):\n refuri = doctree['refuri']\n if refuri.startswith(('http://', 'https://', 'irc:', 'mailto:')):\n return result\n classes = doctree.parent.attributes['classes']\n for level in range(8, 0, -1):\n if self.toctree_template % level in classes:\n result.append({'level': level, 'refuri': html.escape(refuri), 'text': ssp(html.escape(doctree.astext()))})\n break\n elif isinstance(doctree, nodes.Element):\n for elem in doctree:\n result = self.get_refnodes(elem, result)\n return result", + "docstring": "Collect section titles, their depth in the toc and the refuri.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:get_refnodes arg:self arg:doctree arg:result arguments arg arg arg If BoolOp Call Call Assign If Call Return return:yes Assign For Call If Compare Call Call Call Call Call If Call For Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "DixonPrice", + "source_code": "class DixonPrice(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = [(-2, 3), (-2, 3)]\n self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i) for i in range(1, self.N + 1)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n i = arange(2, self.N + 1)\n s = i * (2.0 * x[1:] ** 2.0 - x[:-1]) ** 2.0\n return sum(s) + (x[0] - 1.0) ** 2.0", + "docstring": "Dixon and Price objective function. This class defines the Dixon and Price global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DixonPrice}}(x) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Gavana code not correct. i array should start from 2.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py", + "ast_data": "ClassDef name:DixonPrice Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reset_cudagraph_trees", + "source_code": "def reset_cudagraph_trees() -> None:\n container_dict = get_obj(local, 'tree_manager_containers')\n locks_dict = get_obj(local, 'tree_manager_locks')\n for device, lock in locks_dict.items():\n with lock:\n container = container_dict.get(device)\n if not container or not container.tree_manager:\n continue\n container.tree_manager.shutdown()\n _set_cached_tensors_enabled(False)\n container_dict.clear()\n MarkStepBox.mark_step_counter = 0", + "docstring": "Clear all cudagraph trees", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:reset_cudagraph_trees arguments Assign Call Assign Call For Call With Assign Call If BoolOp Call Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "_compute_interactions", + "source_code": "def _compute_interactions(self, node):\n allowed_features = set()\n interaction_cst_indices = []\n for i in node.interaction_cst_indices:\n if node.split_info.feature_idx in self.interaction_cst[i]:\n interaction_cst_indices.append(i)\n allowed_features.update(self.interaction_cst[i])\n return (np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)), interaction_cst_indices)", + "docstring": "Compute features allowed by interactions to be inherited by child nodes. Example: Assume constraints [{0, 1}, {1, 2}]. 1 <- Both constraint groups could be applied from now on / \\ 1 2 <- Left split still fulfills both constraint groups. / \\ / \\ Right split at feature 2 has only group {1, 2} from now on. LightGBM uses the same logic for overlapping groups. See for details. Parameters: ---------- node : TreeNode A node that might have children. Based on its feature_idx, the interaction constraints for possible child nodes are computed. Returns ------- allowed_features : ndarray, dtype=uint32 Indices of features allowed to split for children. interaction_cst_indices : list of ints Indices of the interaction sets that have to be applied on splits of child nodes. The fewer sets the stronger the constraint as fewer sets contain fewer features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "FunctionDef name:_compute_interactions arg:self arg:node arguments arg arg Assign Call Assign For If Compare Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "round", + "source_code": "def round(self, decimals: int=0) -> Self | Index:\n if decimals >= 0:\n return self.copy()\n elif self.start % 10 ** (-decimals) == 0 and self.step % 10 ** (-decimals) == 0:\n return self.copy()\n else:\n return super().round(decimals=decimals)", + "docstring": "Round each value in the Index to the given number of decimals. Parameters ---------- decimals : int, optional Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point e.g. ``. Returns ------- Index or RangeIndex A new Index with the rounded values. Examples -------- >>> import pandas as pd >>> idx = pd.RangeIndex(10, 30, 10) >>> idx.round(decimals=-1) RangeIndex(start=10, stop=30, step=10) >>> idx = pd.RangeIndex(10, 15, 1) >>> idx.round(decimals=-1) Index([10, 10, 10, 10, 10], dtype='int64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "cryptography", + "name": "parse", + "source_code": "def parse(self) -> Name:\n if not self._has_data():\n return Name([])\n rdns = [self._parse_rdn()]\n while self._has_data():\n self._read_char(',')\n rdns.append(self._parse_rdn())\n return Name(reversed(rdns))", + "docstring": "Parses the string and converts it to a Name. According to RFC4514 section 2.1 the RDNSequence must be reversed when converting to string representation. So, when we parse it, we need to reverse again to get the RDNs on the correct order.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\name.py", + "ast_data": "FunctionDef name:parse arg:self arguments arg If Call Return return:yes Call Assign Call While Call Call Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_srid", + "source_code": "def get_srid(self, obj):\n srid = obj.srid\n if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1):\n return self.srid\n else:\n return srid", + "docstring": "Return the default SRID for the given geometry or raster, taking into account the SRID set for the field. For example, if the input geometry or raster doesn't have an SRID, then the SRID of the field will be returned.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py", + "ast_data": "FunctionDef name:get_srid arg:self arg:obj arguments arg arg Assign If BoolOp Compare Compare BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_clip_path", + "source_code": "def set_clip_path(self, path, transform=None):\n from matplotlib.patches import Patch, Rectangle\n success = False\n if transform is None:\n if isinstance(path, Rectangle):\n self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())\n self._clippath = None\n success = True\n elif isinstance(path, Patch):\n self._clippath = TransformedPatchPath(path)\n success = True\n elif isinstance(path, tuple):\n path, transform = path\n if path is None:\n self._clippath = None\n success = True\n elif isinstance(path, Path):\n self._clippath = TransformedPath(path, transform)\n success = True\n elif isinstance(path, TransformedPatchPath):\n self._clippath = path\n success = True\n elif isinstance(path, TransformedPath):\n self._clippath = path\n success = True\n if not success:\n raise TypeError(f'Invalid arguments to set_clip_path, of type {type(path).__name__} and {type(transform).__name__}')\n self.pchanged()\n self.stale = True", + "docstring": "Set the artist's clip path. Parameters ---------- path : or or or None The clip path. If given a , *transform* must be provided as well. If *None*, a previously set clip path is removed. transform : , optional Only used if *path* is a , in which case the given is converted to a using *transform*. Notes ----- For efficiency, if *path* is a this method will set the clipping box to the corresponding rectangle and set the clipping path to `~.Artist.set`), a tuple (*path*, *transform*) is also accepted as a single positional parameter. .. ACCEPTS: Patch or (Path, Transform) or None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_clip_path arg:self arg:path arg:transform arguments arg arg arg Assign If Compare If Call Assign Call Call Call Assign Assign If Call Assign Call Assign If Call Assign If Compare Assign Assign If Call Assign Call Assign If Call Assign Assign If Call Assign Assign If Raise Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "location_protos", + "source_code": "def location_protos(self):\n return self._location_key_to_location.values()", + "docstring": "Returns list of protos.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", + "ast_data": "FunctionDef name:location_protos arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "state_size", + "source_code": "@property\ndef state_size(self):\n raise NotImplementedError('Abstract method')", + "docstring": "size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:state_size arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "clear_checkbox_name", + "source_code": "def clear_checkbox_name(self, name):\n return name + '-clear'", + "docstring": "Given the name of the file input, return the name of the clear checkbox input.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:clear_checkbox_name arg:self arg:name arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "enclosing_tpu_context", + "source_code": "def enclosing_tpu_context():\n return enclosing_tpu_context_and_graph()[0]", + "docstring": "Returns the TPUReplicateContext, which exists inside a tpu.rewrite().", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py", + "ast_data": "FunctionDef name:enclosing_tpu_context arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_flatten_tensors", + "source_code": "def _flatten_tensors(tensors):\n if not tensors:\n raise ValueError('tensors cannot be empty')\n shape = tensors[0].shape\n for tensor in tensors:\n shape = shape.merge_with(tensor.shape)\n if not shape.is_fully_defined():\n raise ValueError('Tensors must have statically known shape.')\n if len(shape) != 1:\n reshaped = []\n for t in tensors:\n with ops.colocate_with(t):\n reshaped.append(array_ops.reshape(t, [-1]))\n tensors = reshaped\n return (tensors, shape)", + "docstring": "Check tensors for isomorphism and flatten. Args: tensors: list of which must all have the same shape. Returns: tensors: a list of which are flattened (1D) views of tensors shape: the original shape of each element of input tensors Raises: ValueError: tensors are empty or non-isomorphic or have unknown shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_flatten_tensors arg:tensors arguments arg If Raise Call Assign For Assign Call If Call Raise Call If Compare Call Assign For With Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "is_grad_dtype", + "source_code": "def is_grad_dtype(dtype: torch.dtype) -> bool:\n return dtype.is_floating_point or is_complex_dtype(dtype)", + "docstring": "Checks if the dtype can require a gradient.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:is_grad_dtype arg:dtype arguments arg Return return:yes BoolOp Call" + }, + { + "library": "tensorflow", + "name": "_get_kind_name", + "source_code": "def _get_kind_name(item):\n if isinstance(item, (str, bytes)):\n kind = 'bytes_list'\n elif isinstance(item, int):\n kind = 'int64_list'\n elif isinstance(item, float):\n kind = 'float_list'\n elif isinstance(item, Any):\n kind = 'any_list'\n else:\n kind = 'node_list'\n return kind", + "docstring": "Returns the kind name in CollectionDef. Args: item: A data item. Returns: The string representation of the kind in CollectionDef.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", + "ast_data": "FunctionDef name:_get_kind_name arg:item arguments arg If Call Assign If Call Assign If Call Assign If Call Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_filternorm", + "source_code": "def set_filternorm(self, filternorm):\n self._filternorm = bool(filternorm)\n self.stale = True", + "docstring": "Set whether the resize filter normalizes the weights. See help for . Parameters ---------- filternorm : bool", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:set_filternorm arg:self arg:filternorm arguments arg arg Assign Call Assign" + }, + { + "library": "pandas", + "name": "unpack_1tuple", + "source_code": "def unpack_1tuple(tup):\n if len(tup) == 1 and isinstance(tup[0], slice):\n if isinstance(tup, list):\n raise ValueError('Indexing with a single-item list containing a slice is not allowed. Pass a tuple instead.')\n return tup[0]\n return tup", + "docstring": "If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexers\\utils.py", + "ast_data": "FunctionDef name:unpack_1tuple arg:tup arguments arg If BoolOp Compare Call Call If Call Raise Call Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "is_vector_graphics", + "source_code": "def is_vector_graphics(self, filename: str) -> bool:\n ext = os.path.splitext(filename)[-1]\n return ext in VECTOR_GRAPHICS_EXTENSIONS", + "docstring": "Does the filename extension indicate a vector graphic format?", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:is_vector_graphics arg:self arg:filename arguments arg arg Assign Call Return return:yes Compare" + }, + { + "library": "authlib", + "name": "validate_ui_locales_supported", + "source_code": "def validate_ui_locales_supported(self):\n validate_array_value(self, 'ui_locales_supported')", + "docstring": "OPTIONAL. Languages and scripts supported for the user interface, represented as a JSON array of language tag values from BCP 47 [RFC5646]. If omitted, the set of supported languages and scripts is unspecified.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_ui_locales_supported arg:self arguments arg Call" + }, + { + "library": "authlib", + "name": "get_server_metadata", + "source_code": "def get_server_metadata(self):\n raise NotImplementedError()", + "docstring": "Return server metadata which includes supported grant types, response types and etc.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", + "ast_data": "FunctionDef name:get_server_metadata arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "clear", + "source_code": "def clear(self):\n self._primary.clear()\n self._dispatch_dict.clear()", + "docstring": "Removes all functions from the cache.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call" + }, + { + "library": "scikit-learn", + "name": "add_self_request", + "source_code": "def add_self_request(self, obj):\n if getattr(obj, '_type', None) == 'metadata_request':\n self._self_request = deepcopy(obj)\n elif hasattr(obj, '_get_metadata_request'):\n self._self_request = deepcopy(obj._get_metadata_request())\n else:\n raise ValueError('Given `obj` is neither a `MetadataRequest` nor does it implement the required API. Inheriting from `BaseEstimator` implements the required API.')\n return self", + "docstring": "Add (as a consumer) to the routing. This method is used if the router is also a consumer, and hence the router itself needs to be included in the routing. The passed object can be an estimator or a :class:. A router should add itself using this method instead of since it should be treated differently than the other objects to which metadata is routed by the router. Parameters ---------- obj : object This is typically the router instance, i.e. in a `self`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:add_self_request arg:self arg:obj arguments arg arg If Compare Call Assign Call If Call Assign Call Call Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "run", + "source_code": "def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n raise NotImplementedError('all integrators must define run(f, jac, t0, t1, y0, f_params, jac_params)')", + "docstring": "Integrate from t=t0 to t=t1 using y0 as an initial condition. Return 2-tuple (y1,t1) where y1 is the result and t=t1 defines the stoppage coordinate of the result.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_ode.py", + "ast_data": "FunctionDef name:run arg:self arg:f arg:jac arg:y0 arg:t0 arg:t1 arg:f_params arg:jac_params arguments arg arg arg arg arg arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "set_clip_path", + "source_code": "def set_clip_path(self, path):\n _api.check_isinstance((transforms.TransformedPath, None), path=path)\n self._clippath = path", + "docstring": "Set the clip path to a or None.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_clip_path arg:self arg:path arguments arg arg Call Assign" + }, + { + "library": "tensorflow", + "name": "device_name_by_id", + "source_code": "def device_name_by_id(self, device_id):\n return self._device_by_id[device_id].device_name", + "docstring": "Get the name of a device by the debugger-generated ID of the device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:device_name_by_id arg:self arg:device_id arguments arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_antialiased", + "source_code": "def get_antialiased(self):\n return self._antialiased", + "docstring": "Return whether antialiased rendering is used.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:get_antialiased arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "CosineSimilarity", + "source_code": "class CosineSimilarity(MeanMetricWrapper):\n\n def __init__(self, name='cosine_similarity', dtype=None, axis=-1):\n super(CosineSimilarity, self).__init__(cosine_similarity, name, dtype=dtype, axis=axis)", + "docstring": "Computes the cosine similarity between the labels and predictions. See: [Cosine Similarity]( This metric keeps the average cosine similarity between and over a stream of data. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Standalone usage: >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 >>> m = tf.keras.metrics.CosineSimilarity(axis=1) >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]], ... sample_weight=[0.3, 0.7]) >>> m.result().numpy() 0.6999999 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:CosineSimilarity FunctionDef name:__init__ arg:self arg:name arg:dtype arg:axis arguments arg arg arg arg Call Call" + }, + { + "library": "numpy", + "name": "close", + "source_code": "def close(self):\n if self.zip is not None:\n self.zip.close()\n self.zip = None\n if self.fid is not None:\n self.fid.close()\n self.fid = None\n self.f = None", + "docstring": "Close the file.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", + "ast_data": "FunctionDef name:close arg:self arguments arg If Compare Call Assign If Compare Call Assign Assign" + }, + { + "library": "authlib", + "name": "get_op_key", + "source_code": "def get_op_key(self, operation):\n self.check_key_op(operation)\n if operation in self.PUBLIC_KEY_OPS:\n return self.get_public_key()\n return self.get_private_key()", + "docstring": "Get the raw key for the given key_op. This method will also check if the given key_op is supported by this key. :param operation: key operation value, such as \"sign\", \"encrypt\". :return: raw key", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\asymmetric_key.py", + "ast_data": "FunctionDef name:get_op_key arg:self arg:operation arguments arg arg Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_parametrized", + "source_code": "def is_parametrized(module: Module, tensor_name: Optional[str]=None) -> bool:\n parametrizations = getattr(module, 'parametrizations', None)\n if parametrizations is None or not isinstance(parametrizations, ModuleDict):\n return False\n if tensor_name is None:\n return len(parametrizations) > 0\n else:\n return tensor_name in parametrizations", + "docstring": "Determine if a module has a parametrization. Args: module (nn.Module): module to query tensor_name (str, optional): name of the parameter in the module Default: `moduletensor_nametensor_name`", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py", + "ast_data": "FunctionDef name:is_parametrized arg:module arg:tensor_name arguments arg arg Assign Call If BoolOp Compare Call Return return:yes If Compare Return return:yes Compare Call Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_add_op_to_registry", + "source_code": "def _add_op_to_registry(registry, op, fn):\n overloads: list[Union[torch._ops.OperatorBase]] = []\n if isinstance(op, HigherOrderOperator):\n registry[op] = fn\n return\n elif isinstance(op, OpOverload):\n overloads.append(op)\n else:\n assert isinstance(op, OpOverloadPacket)\n for ol in op.overloads():\n overloads.append(getattr(op, ol))\n for op_overload in overloads:\n if op_overload in registry:\n raise RuntimeError(f'duplicate registrations for {op_overload}')\n if torch._C._dispatch_has_kernel(op_overload.name()):\n registry[op_overload] = fn", + "docstring": "This is an internal API for adding an op to the decomposition table. If op is OpOverload, it will be added to the registry directly. If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.", + "type": "function", + "file_path": "pytorch\\torch\\_decomp\\__init__.py", + "ast_data": "FunctionDef name:_add_op_to_registry arg:registry arg:op arg:fn arguments arg arg arg If Call Assign Return return:no If Call Call Call For Call Call Call For If Compare Raise Call If Call Call Assign" + }, + { + "library": "pandas", + "name": "_get_combined_index", + "source_code": "def _get_combined_index(indexes: list[Index], intersect: bool=False, sort: bool=False) -> Index:\n indexes = _get_distinct_objs(indexes)\n if len(indexes) == 0:\n index: Index = default_index(0)\n elif len(indexes) == 1:\n index = indexes[0]\n elif intersect:\n index = indexes[0]\n for other in indexes[1:]:\n index = index.intersection(other)\n else:\n index = union_indexes(indexes, sort=False)\n index = ensure_index(index)\n if sort:\n index = safe_sort_index(index)\n return index", + "docstring": "Return the union or intersection of indexes. Parameters ---------- indexes : list of Index or list objects When intersect=True, do not accept list of lists. intersect : bool, default False If True, calculate the intersection between indexes. Otherwise, calculate the union. sort : bool, default False Whether the result index should come out sorted or not. Returns ------- Index", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\api.py", + "ast_data": "FunctionDef name:_get_combined_index arg:indexes arg:intersect arg:sort arguments arg arg arg Assign Call If Compare Call Call If Compare Call Assign If Assign For Assign Call Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "weekend_to_monday", + "source_code": "def weekend_to_monday(dt: datetime) -> datetime:\n if dt.weekday() == 6:\n return dt + timedelta(1)\n elif dt.weekday() == 5:\n return dt + timedelta(2)\n return dt", + "docstring": "If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe", + "type": "function", + "file_path": "pandas\\pandas\\tseries\\holiday.py", + "ast_data": "FunctionDef name:weekend_to_monday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_resource_variable", + "source_code": "@tf_export('__internal__.ops.is_resource_variable', v1=[])\ndef is_resource_variable(var):\n return isinstance(var, BaseResourceVariable) or hasattr(var, '_should_act_as_resource_variable')", + "docstring": "\"Returns True if is to be considered a ResourceVariable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:is_resource_variable arg:var arguments arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "tensorflow", + "name": "no_gradient", + "source_code": "@deprecation.deprecated_endpoints('NotDifferentiable', 'NoGradient')\n@tf_export('no_gradient', v1=['no_gradient', 'NotDifferentiable', 'NoGradient'])\ndef no_gradient(op_type: str) -> None:\n if not isinstance(op_type, str):\n raise TypeError('op_type must be a string')\n gradient_registry.register(None, op_type)", + "docstring": "Specifies that ops of type is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as that are not differentiable. For example: The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the field for the proto that defines the operation. Raises: TypeError: If is not a string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:no_gradient arg:op_type arguments arg If Call Raise Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "minposx", + "source_code": "@property\ndef minposx(self):\n return self._minpos[0]", + "docstring": "The minimum positive value in the *x*-direction within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum *x*-extent instead of *x0*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:minposx arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "run", + "source_code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n return self._sess.run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)", + "docstring": "Run ops in the monitored session. This method is completely compatible with the method. Args: fetches: Same as . feed_dict: Same as . options: Same as . run_metadata: Same as . Returns: Same as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "unbatch", + "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.unbatch()`.')\n@tf_export('data.experimental.unbatch')\ndef unbatch():\n\n def _apply_fn(dataset):\n return dataset.unbatch()\n return _apply_fn", + "docstring": "Splits elements of a dataset into multiple elements on the batch dimension. For example, if elements of the dataset are shaped , where may vary for each input element, then for each element in the dataset, the unbatched dataset will contain consecutive elements of shape . Returns: A transformation function, which can be passed to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py", + "ast_data": "FunctionDef name:unbatch arguments FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "GroupMember", + "source_code": "class GroupMember(metaclass=_WorldMeta):\n NON_GROUP_MEMBER = -100", + "docstring": "Group member class.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "ClassDef name:GroupMember Assign" + }, + { + "library": "tensorflow", + "name": "result_type", + "source_code": "def result_type(*arrays_and_dtypes):\n return _result_type_impl(*arrays_and_dtypes)", + "docstring": "Determine the result promotion dtype using the JNP-like promotion system. Args: *arrays_and_dtypes: A list of Tensors, Variables, NumPy arrays or python numbers. Returns: The result promotion type from all the inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\flexible_dtypes.py", + "ast_data": "FunctionDef name:result_type arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "deduce_output_dtype_by_name", + "source_code": "def deduce_output_dtype_by_name(op_name: str, *args: Any, **kwargs: Any) -> Optional[torch.dtype]:\n if op_name in boolean_ops():\n return torch.bool\n elif op_name in ('to_dtype', 'index_expr'):\n return kwargs['dtype'] if 'dtype' in kwargs else args[-1]\n elif op_name in ('rand', 'randn'):\n return torch.float\n elif op_name in ('get_index', 'randint64', 'load_seed'):\n return torch.int64\n elif op_name == 'reduction':\n return kwargs['dtype'] if 'dtype' in kwargs else args[1]\n elif op_name == 'constant':\n return kwargs['dtype'] if 'dtype' in kwargs else args[-1]\n elif op_name in ('load', 'store', 'store_reduction'):\n buf_name = args[1]\n return V.graph.get_dtype(buf_name)\n elif op_name == 'to_dtype_bitcast':\n return kwargs['dtype'] if 'dtype' in kwargs else args[-2]\n return None", + "docstring": "Given op name and a list of input dtypes, deduce the output dtype", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", + "ast_data": "FunctionDef name:deduce_output_dtype_by_name arg:op_name arguments arg arg arg If Compare Call Return return:yes If Compare Return return:yes Compare If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Compare If Compare Return return:yes Compare If Compare Assign Return return:yes Call If Compare Return return:yes Compare Return return:no" + }, + { + "library": "scikit-learn", + "name": "_compute_loss_grad", + "source_code": "def _compute_loss_grad(self, layer, sw_sum, activations, deltas, coef_grads, intercept_grads):\n coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])\n coef_grads[layer] += self.alpha * self.coefs_[layer]\n coef_grads[layer] /= sw_sum\n intercept_grads[layer] = np.sum(deltas[layer], axis=0) / sw_sum", + "docstring": "Compute the gradient of loss with respect to coefs and intercept for specified layer. This function does backpropagation for the specified one layer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py", + "ast_data": "FunctionDef name:_compute_loss_grad arg:self arg:layer arg:sw_sum arg:activations arg:deltas arg:coef_grads arg:intercept_grads arguments arg arg arg arg arg arg arg Assign Call Assign Call" + }, + { + "library": "sphinx", + "name": "visit_Try", + "source_code": "def visit_Try(self, node: ast.Try) -> None:\n for subnode in node.body:\n self.visit(subnode)\n for subnode in node.orelse:\n self.visit(subnode)", + "docstring": "Handles Try node and processes body and else-clause. .. note:: pycode parser ignores objects definition in except-clause.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:visit_Try arg:self arg:node arguments arg arg For Call For Call" + }, + { + "library": "scipy", + "name": "read_full_array", + "source_code": "def read_full_array(self, hdr):\n if hdr.is_complex:\n res = self.read_sub_array(hdr, copy=False)\n res_j = self.read_sub_array(hdr, copy=False)\n return res + res_j * 1j\n return self.read_sub_array(hdr)", + "docstring": "Full (rather than sparse) matrix getter Read matrix (array) can be real or complex Parameters ---------- hdr : `` is True, otherwise a real numeric array", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:read_full_array arg:self arg:hdr arguments arg arg If Assign Call Assign Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n assert not model_kwargs\n return (tuple((arg for arg in model_args if arg is not None)), {})", + "docstring": "Remove from arguments. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs. Raises: ValueError: If is not empty.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Compare" + }, + { + "library": "scipy", + "name": "_order_cluster_tree", + "source_code": "def _order_cluster_tree(Z):\n q = deque()\n tree = to_tree(Z)\n q.append(tree)\n nodes = []\n while q:\n node = q.popleft()\n if not node.is_leaf():\n bisect.insort_left(nodes, node)\n q.append(node.get_right())\n q.append(node.get_left())\n return nodes", + "docstring": "Return clustering nodes in bottom-up order by distance. Parameters ---------- Z : scipy.cluster.linkage array The linkage matrix. Returns ------- nodes : list A list of ClusterNode objects.", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:_order_cluster_tree arg:Z arguments arg Assign Call Assign Call Call Assign While Assign Call If Call Call Call Call Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "ModPythonServer", + "source_code": "class ModPythonServer(object):\n template = '\\n# Apache2 server configuration file for running CherryPy with mod_python.\\n\\nDocumentRoot \"/\"\\nListen %(port)s\\nLoadModule python_module modules/mod_python.so\\n\\n\\n SetHandler python-program\\n PythonHandler %(handler)s\\n PythonDebug On\\n%(opts)s\\n\\n'\n\n def __init__(self, loc='/', port=80, opts=None, apache_path='apache', handler='cherrypy._cpmodpy::handler'):\n self.loc = loc\n self.port = port\n self.opts = opts\n self.apache_path = apache_path\n self.handler = handler\n\n def start(self):\n opts = ''.join([' PythonOption %s %s\\n' % (k, v) for k, v in self.opts])\n conf_data = self.template % {'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler}\n mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf')\n with open(mpconf, 'wb') as f:\n f.write(conf_data)\n response = read_process(self.apache_path, '-k start -f %s' % mpconf)\n self.ready = True\n return response\n\n def stop(self):\n os.popen('apache -k stop')\n self.ready = False", + "docstring": "A server wrapper for ``.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\_cpmodpy.py", + "ast_data": "ClassDef name:ModPythonServer Assign FunctionDef name:__init__ arg:self arg:loc arg:port arg:opts arg:apache_path arg:handler arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:start arg:self arguments arg Assign Call Assign Assign Call Call With Call Call Assign Call Assign Return return:yes FunctionDef name:stop arg:self arguments arg Call Assign" + }, + { + "library": "sphinx", + "name": "add_role", + "source_code": "def add_role(self, name: str, role: Any, override: bool=False) -> None:\n logger.debug('[app] adding role: %r', (name, role))\n if not override and docutils.is_role_registered(name):\n logger.warning(__('role %r is already registered and will not be overridden'), name, type='app', subtype='add_role')\n docutils.register_role(name, role)", + "docstring": "Register a Docutils role. :param name: The name of role :param role: A role function :param override: If false, do not install it if another role is already installed as the same name If true, unconditionally install the role. For more details about role functions, see __ . .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_role arg:self arg:name arg:role arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "VirtualHost", + "source_code": "def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):\n from cherrypy.lib import httputil\n\n def vhost_dispatch(path_info):\n request = cherrypy.serving.request\n header = request.headers.get\n domain = header('Host', '')\n if use_x_forwarded_host:\n domain = header('X-Forwarded-Host', domain)\n prefix = domains.get(domain, '')\n if prefix:\n path_info = httputil.urljoin(prefix, path_info)\n result = next_dispatcher(path_info)\n section = request.config.get('tools.staticdir.section')\n if section:\n section = section[len(prefix):]\n request.config['tools.staticdir.section'] = section\n return result\n return vhost_dispatch", + "docstring": "Select a different handler based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different parts of a single website structure. For example:: -> root -> root/domain2/ -> root/secure can be accomplished via the following config:: [/] request.dispatch = cherrypy.dispatch.VirtualHost( **{'www.domain2.example': '/domain2', 'www.domain2.example:443': '/secure', }) next_dispatcher The next dispatcher object in the dispatch chain. The VirtualHost dispatcher adds a prefix to the URL and calls another dispatcher. Defaults to cherrypy.dispatch.Dispatcher(). use_x_forwarded_host If True (the default), any \"X-Forwarded-Host\" request header will be used instead of the \"Host\" header. This is commonly added by HTTP servers (such as Apache) when proxying. `` A dict of {host header value: virtual prefix} pairs. The incoming \"Host\" request header is looked up in this dict, and, if a match is found, the corresponding \"virtual prefix\" value will be prepended to the URL path before calling the next dispatcher. Note that you often need separate entries for \"example.com\" and \"www.example.com\". In addition, \"Host\" headers may contain the port number.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", + "ast_data": "FunctionDef name:VirtualHost arg:next_dispatcher arg:use_x_forwarded_host arguments arg arg arg Call FunctionDef name:vhost_dispatch arg:path_info arguments arg Assign Assign Assign Call If Assign Call Assign Call If Assign Call Assign Call Assign Call If Assign Call Assign Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "invert", + "source_code": "def invert(self) -> Self:\n if self.filter is not None:\n self.filter = (self.filter[0], self.generate_filter_op(invert=True), self.filter[2])\n return self", + "docstring": "invert the filter", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:invert arg:self arguments arg If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "leggauss", + "source_code": "def leggauss(deg):\n ideg = pu._as_int(deg, 'deg')\n if ideg <= 0:\n raise ValueError('deg must be a positive integer')\n c = np.array([0] * deg + [1])\n m = legcompanion(c)\n x = la.eigvalsh(m)\n dy = legval(x, c)\n df = legval(x, legder(c))\n x -= dy / df\n fm = legval(x, c[1:])\n fm /= np.abs(fm).max()\n df /= np.abs(df).max()\n w = 1 / (fm * df)\n w = (w + w[::-1]) / 2\n x = (x - x[::-1]) / 2\n w *= 2.0 / w.sum()\n return (x, w)", + "docstring": "Gauss-Legendre quadrature. Computes the sample points and weights for Gauss-Legendre quadrature. These sample points and weights will correctly integrate polynomials of degree :math: or less over the interval :math: with the weight function :math:. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) where :math: is a constant independent of :math: and :math: is the k'th root of :math:, and then scaling the results to get the right value when integrating 1.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:leggauss arg:deg arguments arg Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "run_monitored_proc", + "source_code": "def run_monitored_proc(code):\n if not sys.platform.startswith('linux'):\n raise RuntimeError('Peak memory monitoring only works on Linux')\n code = textwrap.dedent(code)\n process = subprocess.Popen([sys.executable, '-c', code])\n peak_memusage = -1\n start = time.time()\n while True:\n ret = process.poll()\n if ret is not None:\n break\n with open(f'/proc/{process.pid}/status') as f:\n procdata = f.read()\n m = re.search('VmRSS:\\\\s*(\\\\d+)\\\\s*kB', procdata, re.S | re.I)\n if m is not None:\n memusage = float(m.group(1)) * 1000.0\n peak_memusage = max(memusage, peak_memusage)\n time.sleep(0.01)\n process.wait()\n duration = time.time() - start\n if process.returncode != 0:\n raise AssertionError(f'Running failed:\\n{code}')\n return (duration, peak_memusage)", + "docstring": "Run code in a new Python process, and monitor peak memory usage. Returns ------- duration : float Duration in seconds (including Python startup time) peak_memusage : float Peak memory usage (rough estimate only) in bytes", + "type": "function", + "file_path": "scipy\\benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:run_monitored_proc arg:code arguments arg If Call Raise Call Assign Call Assign Call Assign Assign Call While Assign Call If Compare With Call Assign Call Assign Call If Compare Assign Call Call Assign Call Call Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_adapted", + "source_code": "@property\ndef is_adapted(self):\n return self._is_adapted", + "docstring": "Whether the layer has been fit to data already.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", + "ast_data": "FunctionDef name:is_adapted arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_LazyProtocol", + "source_code": "class _LazyProtocol(Protocol):\n\n def _register_load_state_dict_pre_hook(self, hook):\n ...\n\n def register_forward_pre_hook(self, hook, *, prepend=False, with_kwargs=False):\n ...\n\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n ...\n\n def _get_name(self):\n ...\n\n def _infer_parameters(self, module, input):\n ...\n\n @property\n def _parameters(self):\n ...\n\n @property\n def _buffers(self):\n ...\n\n @property\n def _non_persistent_buffers_set(self):\n ...\n\n @property\n def _load_hook(self):\n ...\n\n @property\n def _initialize_hook(self):\n ...", + "docstring": "This class is used to avoid errors with mypy checks for the attributes in a mixin.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\lazy.py", + "ast_data": "ClassDef name:_LazyProtocol FunctionDef name:_register_load_state_dict_pre_hook arg:self arg:hook arguments arg arg FunctionDef name:register_forward_pre_hook arg:self arg:hook arguments arg arg arg arg FunctionDef name:_lazy_load_hook arg:self arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg FunctionDef name:_get_name arg:self arguments arg FunctionDef name:_infer_parameters arg:self arg:module arg:input arguments arg arg arg FunctionDef name:_parameters arg:self arguments arg FunctionDef name:_buffers arg:self arguments arg FunctionDef name:_non_persistent_buffers_set arg:self arguments arg FunctionDef name:_load_hook arg:self arguments arg FunctionDef name:_initialize_hook arg:self arguments arg" + }, + { + "library": "matplotlib", + "name": "set_alpha", + "source_code": "def set_alpha(self, alpha):\n artist.Artist._set_alpha_for_array(self, alpha)\n self._set_facecolor(self._original_facecolor)\n self._set_edgecolor(self._original_edgecolor)\n self._set_hatchcolor(self._original_hatchcolor)", + "docstring": "Set the transparency of the collection. Parameters ---------- alpha : float or array of float or None If not None, *alpha* values must be between 0 and 1, inclusive. If an array is provided, its length must match the number of elements in the collection. Masked values and nans are not supported.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_lib", + "source_code": "@tf_export('sysconfig.get_lib')\ndef get_lib():\n import tensorflow as tf\n return _os_path.join(_os_path.dirname(tf.__file__))", + "docstring": "Get the directory containing the TensorFlow framework library. Returns: The directory as string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py", + "ast_data": "FunctionDef name:get_lib arguments Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "autoscale", + "source_code": "def autoscale(self, A):\n with self.callbacks.blocked():\n self.vmin = self.vmax = None\n self.autoscale_None(A)\n self._changed()", + "docstring": "Set *vmin*, *vmax* to min, max of *A*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:autoscale arg:self arg:A arguments arg arg With Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "serialized", + "source_code": "def serialized(self):\n if self._serialized is None:\n proto = topology_pb2.TopologyProto()\n proto.mesh_shape[:] = list(self._mesh_shape)\n proto.num_tasks = self._device_coordinates.shape[0]\n proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]\n proto.device_coordinates.extend(list(self._device_coordinates.flatten()))\n self._serialized = proto.SerializeToString()\n return self._serialized", + "docstring": "Returns the serialized form of the topology.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:serialized arg:self arguments arg If Compare Assign Call Assign Call Assign Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "__getstate__", + "source_code": "def __getstate__(self):\n state = super().__getstate__()\n state.pop('f_', None)\n return state", + "docstring": "Pickle-protocol - return state of the estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "gather", + "source_code": "@_exception_logger\ndef gather(tensor: torch.Tensor, gather_list: Optional[list[torch.Tensor]]=None, dst: Optional[int]=None, group: Optional[ProcessGroup]=None, async_op: bool=False, group_dst: Optional[int]=None):\n _check_single_tensor(tensor, 'tensor')\n if gather_list:\n _check_tensor_list(gather_list, 'gather_list')\n else:\n gather_list = []\n _ensure_all_tensors_same_dtype(tensor, gather_list)\n group = _group_or_default_group(group)\n if _rank_not_in_group(group):\n _warn_not_in_group('gather')\n return\n if dst is None and group_dst is None:\n dst = 0\n group_dst = _canonicalize_group_rank(group, dst, group_dst, return_global=False)\n my_group_rank = group.rank()\n _validate_output_list_for_rank(my_group_rank, group_dst, gather_list)\n output_tensors = [gather_list] if group_dst == my_group_rank else []\n input_tensors = [tensor]\n opts = GatherOptions()\n opts.rootRank = group_dst\n opts.asyncOp = async_op\n work = group.gather(output_tensors, input_tensors, opts)\n if async_op:\n return work\n elif work is not None:\n work.wait()", + "docstring": "Gathers a list of tensors in a single process. This function requires all tensors to be the same size on each process. Args: tensor (Tensor): Input tensor. gather_list (list[Tensor], optional): List of appropriately, same-sized tensors to use for gathered data (default is None, must be specified on the destination rank) dst (int, optional): Destination rank on global process group (regardless of `` Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. note:: Note that all Tensors in gather_list must have the same size. Example:: >>> # xdoctest: +SKIP(\"no rank\") >>> # We have 2 process groups, 2 ranks. >>> tensor_size = 2 >>> device = torch.device(f'cuda:{rank}') >>> tensor = torch.ones(tensor_size, device=device) + rank >>> if dist.get_rank() == 0: >>> gather_list = [torch.zeros_like(tensor, device=device) for i in range(2)] >>> else: >>> gather_list = None >>> dist.gather(tensor, gather_list, dst=0) >>> # Rank 0 gets gathered data. >>> gather_list [tensor([1., 1.], device='cuda:0'), tensor([2., 2.], device='cuda:0')] # Rank 0 None # Rank 1", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:gather arg:tensor arg:gather_list arg:dst arg:group arg:async_op arg:group_dst arguments arg arg arg arg arg arg Call If Call Assign Call Assign Call If Call Call Return return:no If BoolOp Compare Compare Assign Assign Call Assign Call Call Assign Compare Assign Assign Call Assign Assign Assign Call If Return return:yes If Compare Call" + }, + { + "library": "pandas", + "name": "_wrap_transform_fast_result", + "source_code": "@final\ndef _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT:\n obj = self._obj_with_exclusions\n ids = self._grouper.ids\n result = result.reindex(self._grouper.result_index, axis=0)\n if self.obj.ndim == 1:\n out = algorithms.take_nd(result._values, ids)\n output = obj._constructor(out, index=obj.index, name=obj.name)\n else:\n new_ax = result.index.take(ids)\n output = result._reindex_with_indexers({0: (new_ax, ids)}, allow_dups=True)\n output = output.set_axis(obj.index, axis=0)\n return output", + "docstring": "Fast transform path for aggregations.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:_wrap_transform_fast_result arg:self arg:result arguments arg arg Assign Assign Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self) -> Any:\n return function_type_pb2.FunctionType(parameters=[p.to_proto() for p in self.parameters.values()], captures=[function_type_pb2.Capture(name=n, type_constraint=serialization.serialize(t)) for n, t in self.captures.items()])", + "docstring": "Generate a proto representation from the FunctionType.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:to_proto arg:self arguments arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "_findfile", + "source_code": "def _findfile(self, path):\n return DataSource._findfile(self, self._fullpath(path))", + "docstring": "Extend DataSource method to prepend baseurl to ``.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_datasource.py", + "ast_data": "FunctionDef name:_findfile arg:self arg:path arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "CosineSimilarity", + "source_code": "class CosineSimilarity(Module):\n __constants__ = ['dim', 'eps']\n dim: int\n eps: float\n\n def __init__(self, dim: int=1, eps: float=1e-08) -> None:\n super().__init__()\n self.dim = dim\n self.eps = eps\n\n def forward(self, x1: Tensor, x2: Tensor) -> Tensor:\n return F.cosine_similarity(x1, x2, self.dim, self.eps)", + "docstring": "Returns cosine similarity between :math: and :math:, computed along . .. math :: \\text{similarity} = \\dfrac{x_1 \\cdot x_2}{\\max(\\Vert x_1 \\Vert _2 \\cdot \\Vert x_2 \\Vert _2, \\epsilon)}. Args: dim (int, optional): Dimension where cosine similarity is computed. Default: 1 eps (float, optional): Small value to avoid division by zero. Default: 1e-8 Shape: - Input1: :math: where D is at position - Input2: :math:, same number of dimensions as x1, matching x1 size at dimension , and broadcastable with x1 at other dimensions. - Output: :math: Examples: >>> input1 = torch.randn(100, 128) >>> input2 = torch.randn(100, 128) >>> cos = nn.CosineSimilarity(dim=1, eps=1e-6) >>> output = cos(input1, input2)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\distance.py", + "ast_data": "ClassDef name:CosineSimilarity Assign FunctionDef name:__init__ arg:self arg:dim arg:eps arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x1 arg:x2 arguments arg arg arg Return return:yes Call" + }, + { + "library": "virtualenv", + "name": "seeder", + "source_code": "@property\ndef seeder(self):\n return self._seeder", + "docstring": "The mechanism used to provide the seed packages (pip, setuptools, wheel).", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\run\\session.py", + "ast_data": "FunctionDef name:seeder arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "string", + "source_code": "@property\ndef string(self) -> str:\n return self._str", + "docstring": "The Stata representation of the missing value: '.', '.a'..'.z' Returns ------- str The representation of the missing value.", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:string arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "Block3D", + "source_code": "class Block3D(Benchmark):\n params = [[1, 10, 100], ['block', 'copy']]\n param_names = ['n', 'mode']\n\n def setup(self, n, mode):\n self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1\n self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2\n self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3\n self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4\n self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5\n self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6\n self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7\n self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8\n self.block = [[[self.a000, self.a001], [self.a010, self.a011]], [[self.a100, self.a101], [self.a110, self.a111]]]\n self.arr_list = [a for two_d in self.block for one_d in two_d for a in one_d]\n\n def time_3d(self, n, mode):\n if mode == 'block':\n np.block(self.block)\n else:\n [arr.copy() for arr in self.arr_list]\n time_3d.benchmark_name = 'bench_shape_base.Block.time_3d'", + "docstring": "This benchmark concatenates an array of size ``", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_shape_base.py", + "ast_data": "ClassDef name:Block3D Assign Assign FunctionDef name:setup arg:self arg:n arg:mode arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign FunctionDef name:time_3d arg:self arg:n arg:mode arguments arg arg arg If Compare Call Call Assign" + }, + { + "library": "matplotlib", + "name": "Scaled", + "source_code": "class Scaled(_Base):\n\n def __init__(self, scalable_size):\n self._scalable_size = scalable_size\n\n def get_size(self, renderer):\n rel_size = self._scalable_size\n abs_size = 0.0\n return (rel_size, abs_size)", + "docstring": "Simple scaled(?) size with absolute part = 0 and relative part = *scalable_size*.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", + "ast_data": "ClassDef name:Scaled FunctionDef name:__init__ arg:self arg:scalable_size arguments arg arg Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Return return:yes" + }, + { + "library": "pandas", + "name": "sp_index", + "source_code": "@property\ndef sp_index(self) -> SparseIndex:\n return self._sparse_index", + "docstring": "The SparseIndex containing the location of non- `` points.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py", + "ast_data": "FunctionDef name:sp_index arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "restore_thread_local_summary_state", + "source_code": "def restore_thread_local_summary_state(self):\n summary_state = summary_ops_v2._summary_state\n summary_state.step = self._summary_step\n summary_state.writer = self._summary_writer\n summary_state.is_recording = self._summary_recording\n summary_state.is_recording_distribution_strategy = self._summary_recording_distribution_strategy", + "docstring": "Restore thread local summary state from self.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_run.py", + "ast_data": "FunctionDef name:restore_thread_local_summary_state arg:self arguments arg Assign Assign Assign Assign Assign" + }, + { + "library": "pandas", + "name": "mul", + "source_code": "def mul(self, other, level: Level | None=None, fill_value: float | None=None, axis: Axis=0) -> Series:\n return self._flex_method(other, operator.mul, level=level, fill_value=fill_value, axis=axis)", + "docstring": "Return Multiplication of series and other, element-wise (binary operator ). Equivalent to `Python documentation `_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=[\"a\", \"b\", \"c\", \"d\"]) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=[\"a\", \"b\", \"d\", \"e\"]) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64 >>> a.mul(5, fill_value=0) a 5.0 b 5.0 c 5.0 d 0.0 dtype: float64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:mul arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_or_create_assets_dir", + "source_code": "def get_or_create_assets_dir(export_dir):\n assets_destination_dir = get_assets_dir(export_dir)\n file_io.recursive_create_dir(assets_destination_dir)\n return assets_destination_dir", + "docstring": "Return assets sub-directory, or create one if it doesn't exist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py", + "ast_data": "FunctionDef name:get_or_create_assets_dir arg:export_dir arguments arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__rdiv__", + "source_code": "def __rdiv__(self, other):\n raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use // instead\".format(type(other).__name__))", + "docstring": "Use via instead. This function exists only to have a better error message. Instead of: , this function will explicitly call for usage of instead. Args: other: Another . Raises: TypeError.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:__rdiv__ arg:self arg:other arguments arg arg Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "_infer_frame_shape", + "source_code": "def _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis):\n frame_length = tensor_util.constant_value(frame_length)\n frame_step = tensor_util.constant_value(frame_step)\n axis = tensor_util.constant_value(axis)\n if signal.shape.ndims is None:\n return None\n if axis is None:\n return [None] * (signal.shape.ndims + 1)\n signal_shape = signal.shape.as_list()\n num_frames = None\n frame_axis = signal_shape[axis]\n outer_dimensions = signal_shape[:axis]\n inner_dimensions = signal_shape[axis:][1:]\n if signal_shape and frame_axis is not None:\n if frame_step is not None and pad_end:\n num_frames = max(0, -(-frame_axis // frame_step))\n elif frame_step is not None and frame_length is not None:\n assert not pad_end\n num_frames = max(0, (frame_axis - frame_length + frame_step) // frame_step)\n return outer_dimensions + [num_frames, frame_length] + inner_dimensions", + "docstring": "Infers the shape of the return value of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\shape_ops.py", + "ast_data": "FunctionDef name:_infer_frame_shape arg:signal arg:frame_length arg:frame_step arg:pad_end arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:no If Compare Return return:yes Assign Call Assign Assign Assign Assign If BoolOp Compare If BoolOp Compare Assign Call If BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "update_kwarg", + "source_code": "@compatibility(is_backward_compatible=True)\ndef update_kwarg(self, key: str, arg: Argument) -> None:\n self.kwargs = {**self.kwargs, key: arg}", + "docstring": "Update an existing keyword argument to contain the new value ``", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:update_kwarg arg:self arg:key arg:arg arguments arg arg arg Assign Call" + }, + { + "library": "numpy", + "name": "count", + "source_code": "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", + "docstring": "Returns an array with the number of non-overlapping occurrences of substring in the range [, ]. See Also -------- char.count", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:count arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "describe_categorical", + "source_code": "@property\ndef describe_categorical(self):\n if not self.dtype[0] == DtypeKind.CATEGORICAL:\n raise TypeError('describe_categorical only works on a column with categorical dtype!')\n return {'is_ordered': self._col.cat.ordered, 'is_dictionary': True, 'categories': PandasColumn(pd.Series(self._col.cat.categories))}", + "docstring": "If the dtype is categorical, there are two options: - There are only values in the data buffer. - There is a separate non-categorical Column encoding for categorical values. Raises TypeError if the dtype is not categorical Content of returned dict: - \"is_ordered\" : bool, whether the ordering of dictionary indices is semantically meaningful. - \"is_dictionary\" : bool, whether a dictionary-style mapping of categorical values to other objects exists - \"categories\" : Column representing the (implicit) mapping of indices to category values (e.g. an array of cat1, cat2, ...). None if not a dictionary-style categorical.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\column.py", + "ast_data": "FunctionDef name:describe_categorical arg:self arguments arg If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "find_bezier_t_intersecting_with_closedpath", + "source_code": "def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, t0=0.0, t1=1.0, tolerance=0.01):\n start = bezier_point_at_t(t0)\n end = bezier_point_at_t(t1)\n start_inside = inside_closedpath(start)\n end_inside = inside_closedpath(end)\n if start_inside == end_inside and start != end:\n raise NonIntersectingPathException('Both points are on the same side of the closed path')\n while True:\n if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:\n return (t0, t1)\n middle_t = 0.5 * (t0 + t1)\n middle = bezier_point_at_t(middle_t)\n middle_inside = inside_closedpath(middle)\n if start_inside ^ middle_inside:\n t1 = middle_t\n if end == middle:\n return (t0, t1)\n end = middle\n else:\n t0 = middle_t\n if start == middle:\n return (t0, t1)\n start = middle\n start_inside = middle_inside", + "docstring": "Find the intersection of the Bézier curve with a closed path. The intersection point *t* is approximated by two parameters *t0*, *t1* such that *t0* tuple[float, float] inside_closedpath : callable A function returning True if a given point (x, y) is inside the closed path. It must have the signature:: inside_closedpath(point: tuple[float, float]) -> bool t0, t1 : float Start parameters for the search. tolerance : float Maximal allowed distance between the final points. Returns ------- t0, t1 : float The Bézier path parameters.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", + "ast_data": "FunctionDef name:find_bezier_t_intersecting_with_closedpath arg:bezier_point_at_t arg:inside_closedpath arg:t0 arg:t1 arg:tolerance arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Raise Call While If Compare Call Return return:yes Assign Assign Call Assign Call If Assign If Compare Return return:yes Assign Assign If Compare Return return:yes Assign Assign" + }, + { + "library": "numpy", + "name": "get_flags_fix", + "source_code": "def get_flags_fix(self):\n return self._get_command_flags('compiler_fix')", + "docstring": "List of Fortran 90 fixed format specific flags.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:get_flags_fix arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "module_name", + "source_code": "def module_name(self):\n if self.user_given_name:\n return self.user_given_name\n return self.__class__.__name__", + "docstring": "this is used to label the operator being benchmarked", + "type": "method", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:module_name arg:self arguments arg If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_replace_tensors_by_numpy_ndarrays", + "source_code": "def _replace_tensors_by_numpy_ndarrays(repr_ds_map: rd.RepresentativeDatasetMapping) -> None:\n with session.Session() as sess:\n for signature_def_key in repr_ds_map:\n ds = repr_ds_map[signature_def_key]\n repr_ds_map[signature_def_key] = rd.replace_tensors_by_numpy_ndarrays(ds, sess)", + "docstring": "Replaces tf.Tensors by their evaluated numpy arrays. This assumes that tf.Tensors in representative samples are created in the default Graph. It will raise an error if tensors are created in a different graph. Args: repr_ds_map: SignatureDef key -> RepresentativeDataset mapping.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py", + "ast_data": "FunctionDef name:_replace_tensors_by_numpy_ndarrays arg:repr_ds_map arguments arg With Call For Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "record_operation_backprop_only", + "source_code": "def record_operation_backprop_only(op_type, output_tensors, input_tensors, backward_function):\n pywrap_tfe.TFE_Py_TapeSetRecordOperationBackprop(op_type, output_tensors, input_tensors, backward_function)", + "docstring": "Records the operation on all backward tapes in the stack.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", + "ast_data": "FunctionDef name:record_operation_backprop_only arg:op_type arg:output_tensors arg:input_tensors arg:backward_function arguments arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "synchronize", + "source_code": "def synchronize(device: _device_t=None) -> None:\n pass", + "docstring": "Waits for all kernels in all streams on the CPU device to complete. Args: device (torch.device or int, optional): ignored, there's only one CPU device. N.B. This function only exists to facilitate device-agnostic code.", + "type": "function", + "file_path": "pytorch\\torch\\cpu\\__init__.py", + "ast_data": "FunctionDef name:synchronize arg:device arguments arg" + }, + { + "library": "numpy", + "name": "diag", + "source_code": "def diag(v, k=0):\n output = np.diag(v, k).view(MaskedArray)\n if getmask(v) is not nomask:\n output._mask = np.diag(v._mask, k)\n return output", + "docstring": "Extract a diagonal or construct a diagonal array. This function is the equivalent of that takes masked values into account, see for details. See Also -------- numpy.diag : Equivalent function for ndarrays. Examples -------- >>> import numpy as np Create an array with negative values masked: >>> import numpy as np >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) >>> masked_x = np.ma.masked_array(x, mask=x >> masked_x masked_array( data=[[11.2, --, 18.0], [0.801, --, 12.0], [7.0, 33.0, --]], mask=[[False, True, False], [False, True, False], [False, False, True]], fill_value=1e+20) Isolate the main diagonal from the masked array: >>> np.ma.diag(masked_x) masked_array(data=[11.2, --, --], mask=[False, True, True], fill_value=1e+20) Isolate the first diagonal below the main diagonal: >>> np.ma.diag(masked_x, -1) masked_array(data=[0.801, 33.0], mask=[False, False], fill_value=1e+20)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:diag arg:v arg:k arguments arg arg Assign Call Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "highlight_null", + "source_code": "@Substitution(subset=subset_args, props=properties_args, color=coloring_args.format(default='red'))\ndef highlight_null(self, color: str='red', subset: Subset | None=None, props: str | None=None) -> Styler:\n\n def f(data: DataFrame, props: str) -> np.ndarray:\n return np.where(pd.isna(data).to_numpy(), props, '')\n if props is None:\n props = f'background-color: {color};'\n return self.apply(f, axis=None, subset=subset, props=props)", + "docstring": "Highlight missing values with a style. Parameters ---------- %(color)s .. versionadded:: 1.5.0 %(subset)s %(props)s .. versionadded:: 1.3.0 Returns ------- Styler Instance of class where null values are highlighted with given style. See Also -------- Styler.highlight_max: Highlight the maximum with a style. Styler.highlight_min: Highlight the minimum with a style. Styler.highlight_between: Highlight a defined range with a style. Styler.highlight_quantile: Highlight values defined by a quantile with a style. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, np.nan]}) >>> df.style.highlight_null(color=\"yellow\") # doctest: +SKIP Please see: _ for more examples.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:highlight_null arg:self arg:color arg:subset arg:props arguments arg arg arg arg FunctionDef name:f arg:data arg:props arguments arg arg Return return:yes Call Call Call If Compare Assign Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "arrays", + "source_code": "@property\ndef arrays(self) -> list[ArrayLike]:\n return [blk.values for blk in self.blocks]", + "docstring": "Quick access to the backing arrays of the Blocks. Only for compatibility with ArrayManager for testing convenience. Not to be used in actual code, and return value is not the same as the ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). Warning! The returned arrays don't handle Copy-on-Write, so this should be used with caution (only in read-mode).", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:arrays arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "convert_points_to_homogeneous", + "source_code": "def convert_points_to_homogeneous(points: Tensor) -> Tensor:\n if not isinstance(points, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(points)}')\n if len(points.shape) < 2:\n raise ValueError(f'Input must be at least a 2D tensor. Got {points.shape}')\n return pad(points, [0, 1], 'constant', 1.0)", + "docstring": "Convert points from Euclidean to homogeneous space. Args: points: the points to be transformed with shape :math:. Returns: the points in homogeneous coordinates :math:. Examples: >>> input = tensor([[0., 0.]]) >>> convert_points_to_homogeneous(input) tensor([[0., 0., 1.]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:convert_points_to_homogeneous arg:points arguments arg If Call Raise Call Call If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "tolist", + "source_code": "def tolist(self):\n _warn_typed_storage_removal()\n return list(self)", + "docstring": "Return a list containing the elements of this storage.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:tolist arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "DisableSharedObjectScope", + "source_code": "class DisableSharedObjectScope(object):\n\n def __enter__(self):\n SHARED_OBJECT_DISABLED.disabled = True\n self._orig_loading_scope = _shared_object_loading_scope()\n self._orig_saving_scope = _shared_object_saving_scope()\n\n def __exit__(self, *args, **kwargs):\n SHARED_OBJECT_DISABLED.disabled = False\n SHARED_OBJECT_LOADING.scope = self._orig_loading_scope\n SHARED_OBJECT_SAVING.scope = self._orig_saving_scope", + "docstring": "A context manager for disabling handling of shared objects. Disables shared object handling for both saving and loading. Created primarily for use with , which does extra surgery that is incompatible with shared objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", + "ast_data": "ClassDef name:DisableSharedObjectScope FunctionDef name:__enter__ arg:self arguments arg Assign Assign Call Assign Call FunctionDef name:__exit__ arg:self arguments arg arg arg Assign Assign Assign" + }, + { + "library": "django", + "name": "has_changed", + "source_code": "def has_changed(self, initial, data):\n initial_value = self.to_python(initial)\n return super().has_changed(initial_value, data)", + "docstring": "Return True if data differs from initial.", + "type": "method", + "file_path": "django\\django\\contrib\\postgres\\forms\\hstore.py", + "ast_data": "FunctionDef name:has_changed arg:self arg:initial arg:data arguments arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_math_mode_with_parentheses", + "source_code": "def _math_mode_with_parentheses(s: str) -> str:\n s = s.replace('\\\\(', 'LEFT§=§6yzLEFT').replace('\\\\)', 'RIGHTab5§=§RIGHT')\n res = []\n for item in re.split('LEFT§=§6yz|ab5§=§RIGHT', s):\n if item.startswith('LEFT') and item.endswith('RIGHT'):\n res.append(item.replace('LEFT', '\\\\(').replace('RIGHT', '\\\\)'))\n elif 'LEFT' in item and 'RIGHT' in item:\n res.append(_escape_latex(item).replace('LEFT', '\\\\(').replace('RIGHT', '\\\\)'))\n else:\n res.append(_escape_latex(item).replace('LEFT', '\\\\textbackslash (').replace('RIGHT', '\\\\textbackslash )'))\n return ''.join(res)", + "docstring": "All characters in LaTeX math mode are preserved. The substrings in LaTeX math mode, which start with the character ``, are preserved without escaping. Otherwise regular LaTeX escaping applies. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_math_mode_with_parentheses arg:s arguments arg Assign Call Call Assign For Call If BoolOp Call Call Call Call Call If BoolOp Compare Compare Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "estimate_bandwidth", + "source_code": "@validate_params({'X': ['array-like'], 'quantile': [Interval(Real, 0, 1, closed='both')], 'n_samples': [Interval(Integral, 1, None, closed='left'), None], 'random_state': ['random_state'], 'n_jobs': [Integral, None]}, prefer_skip_nested_validation=True)\ndef estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):\n X = check_array(X)\n random_state = check_random_state(random_state)\n if n_samples is not None:\n idx = random_state.permutation(X.shape[0])[:n_samples]\n X = X[idx]\n n_neighbors = int(X.shape[0] * quantile)\n if n_neighbors < 1:\n n_neighbors = 1\n nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)\n nbrs.fit(X)\n bandwidth = 0.0\n for batch in gen_batches(len(X), 500):\n d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)\n bandwidth += np.max(d, axis=1).sum()\n return bandwidth / X.shape[0]", + "docstring": "Estimate the bandwidth to use with the mean-shift algorithm. This function takes time at least quadratic in . For large datasets, it is wise to subsample by setting . Alternatively, the parameter can be set to a small value without estimating it. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points. quantile : float, default=0.3 Should be between [0, 1] 0.5 means that the median of all pairwise distances is used. n_samples : int, default=None The number of samples to use. If not given, all samples are used. random_state : int, RandomState instance, default=None The generator used to randomly select the samples from input points for bandwidth estimation. Use an int to make the randomness deterministic. See :term:. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. `joblib.parallel_backendGlossary ` for more details. Returns ------- bandwidth : float The bandwidth parameter. Examples -------- >>> import numpy as np >>> from sklearn.cluster import estimate_bandwidth >>> X = np.array([[1, 1], [2, 1], [1, 0], ... [4, 7], [3, 5], [3, 6]]) >>> estimate_bandwidth(X, quantile=0.5) np.float64(1.61)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cluster\\_mean_shift.py", + "ast_data": "FunctionDef name:estimate_bandwidth arg:X arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Assign Call Call Assign For Call Call Assign Call Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "transform_get_item_tensor", + "source_code": "@register_transformation_rule(GetItemTensor)\ndef transform_get_item_tensor(constraint, counter):\n assert isinstance(constraint.index_tuple, tuple)\n dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n nat_constraints = gen_nat_constraints(dims)\n none_c = constraint.index_tuple.count(None)\n resulting_tensor_dims = (none_c + len(dims)) * [None]\n dim_index = 0\n for i in range(len(constraint.index_tuple)):\n if constraint.index_tuple[i] is None:\n resulting_tensor_dims[i] = 1\n elif constraint.index_tuple[i] == slice(None, None, None):\n pass\n else:\n raise NotImplementedError('Method not yet implemented')\n dim_index = 0\n for i in range(len(resulting_tensor_dims)):\n if resulting_tensor_dims[i] is None:\n resulting_tensor_dims[i] = dims[dim_index]\n dim_index += 1\n is_valid_index = valid_index_tensor(constraint.index_tuple, dims)\n if len(resulting_tensor_dims) > 4:\n return (F(), counter)\n else:\n constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq), *nat_constraints, is_valid_index]\n return (Conj(constraints), counter)", + "docstring": "When the index is a tuple, then the output will be a tensor TODO: we have to check if this is the case for all HF models The cases we are covering here are a tuple with one of: - slice with default argument - None None appends 1 to the input tensor dimensions so each occurrence of 'None' increases the rank by 1 slice with default arguments does not change the rank", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:transform_get_item_tensor arg:constraint arg:counter arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Assign For Call Call If Compare Assign If Compare Call Raise Call Assign For Call Call If Compare Assign Assign Call If Compare Call Return return:yes Call Assign Call Call Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X):\n return np.log(self.predict_proba(X))", + "docstring": "Predict logarithm of probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_log_prob : array-like of shape (n_samples, n_classes) The predicted logarithm of the probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multioutput.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "vectorstrength", + "source_code": "def vectorstrength(events, period):\n xp = array_namespace(events, period)\n events = xp.asarray(events)\n period = xp.asarray(period)\n if xp.isdtype(period.dtype, 'integral'):\n period = xp.astype(period, xp.float64)\n if events.ndim > 1:\n raise ValueError('events cannot have dimensions more than 1')\n if period.ndim > 1:\n raise ValueError('period cannot have dimensions more than 1')\n scalarperiod = not period.ndim\n events = xpx.atleast_nd(events, ndim=2, xp=xp)\n period = xpx.atleast_nd(period, ndim=2, xp=xp)\n if xp.any(period <= 0):\n raise ValueError('periods must be positive')\n events_ = xp.astype(events, period.dtype)\n vectors = xp.exp(2j * (xp.pi / period.T @ events_))\n vectormean = xp.mean(vectors, axis=1)\n strength = xp.abs(vectormean)\n phase = _angle(vectormean, xp)\n if scalarperiod:\n strength = strength[0]\n phase = phase[0]\n return (strength, phase)", + "docstring": "Determine the vector strength of the events corresponding to the given period. The vector strength is a measure of phase synchrony, how well the timing of the events is synchronized to a single period of a periodic signal. If multiple periods are used, calculate the vector strength of each. This is called the \"resonating vector strength\". Parameters ---------- events : 1D array_like An array of time points containing the timing of the events. period : float or array_like The period of the signal that the events should synchronize to. The period is in the same units as . It can also be an array of periods, in which case the outputs are arrays of the same length. Returns ------- strength : float or 1D array The strength of the synchronization. 1.0 is perfect synchronization and 0.0 is no synchronization. If is an array, this is also an array with each element containing the vector strength at the corresponding period. phase : float or array The phase that the events are most strongly synchronized to in radians. If is an array, this is also an array with each element containing the phase for the corresponding period. References ---------- van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector strength: Auditory system, electric fish, and noise. Chaos 21, 047508 (2011); :doi:. van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: biological and mathematical perspectives. Biol Cybern. 2013 Aug;107(4):385-96. :doi:. van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens when we vary the \"probing\" frequency while keeping the spike times fixed. Biol Cybern. 2013 Aug;107(4):491-94. :doi:.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_signaltools.py", + "ast_data": "FunctionDef name:vectorstrength arg:events arg:period arguments arg arg Assign Call Assign Call Assign Call If Call Assign Call If Compare Raise Call If Compare Raise Call Assign Assign Call Assign Call If Call Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call If Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_arrow_wedge", + "source_code": "def _get_arrow_wedge(self, x0, y0, x1, y1, head_dist, cos_t, sin_t, linewidth):\n dx, dy = (x0 - x1, y0 - y1)\n cp_distance = np.hypot(dx, dy)\n pad_projected = 0.5 * linewidth / sin_t\n if cp_distance == 0:\n cp_distance = 1\n ddx = pad_projected * dx / cp_distance\n ddy = pad_projected * dy / cp_distance\n dx = dx / cp_distance * head_dist\n dy = dy / cp_distance * head_dist\n dx1, dy1 = (cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy)\n dx2, dy2 = (cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy)\n vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1), (x1 + ddx, y1 + ddy), (x1 + ddx + dx2, y1 + ddy + dy2)]\n codes_arrow = [Path.MOVETO, Path.LINETO, Path.LINETO]\n return (vertices_arrow, codes_arrow, ddx, ddy)", + "docstring": "Return the paths for arrow heads. Since arrow lines are drawn with capstyle=projected, The arrow goes beyond the desired point. This method also returns the amount of the path to be shrunken so that it does not overshoot.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:_get_arrow_wedge arg:self arg:x0 arg:y0 arg:x1 arg:y1 arg:head_dist arg:cos_t arg:sin_t arg:linewidth arguments arg arg arg arg arg arg arg arg arg Assign Assign Call Assign If Compare Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_or_restore_slot_variable", + "source_code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n variable_key = _var_key(variable)\n slot_dict = self._slots.get(variable_key, {})\n slot_variable = slot_dict.get(slot_name, None)\n if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack or self._distribution_strategy):\n initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n slot_variable = self.add_slot(var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape())\n if slot_variable is not None:\n slot_variable_position.restore(slot_variable)\n else:\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)", + "docstring": "Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A object indicating the slot variable object to be restored. slot_name: The name of this 's slot to restore into. variable: The variable object this slot is being created for.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Call Call BoolOp Call Assign Call Assign Call Call If Compare Call Call Call Call" + }, + { + "library": "sphinx", + "name": "_relative_path", + "source_code": "def _relative_path(path: Path, root: Path, /) -> Path:\n if '..' in path.parts:\n path = path.resolve()\n if '..' in root.parts:\n root = root.resolve()\n if path.anchor != root.anchor or '..' in root.parts:\n return path\n if sys.version_info[:2] < (3, 12):\n return Path(os.path.relpath(path, root))\n return path.relative_to(root, walk_up=True)", + "docstring": "Return a relative filepath to *path* from the given *root* directory. This is an alternative of ``. It returns the original path if *path* and *root* are on different drives, which may happen on Windows.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\osutil.py", + "ast_data": "FunctionDef name:_relative_path arguments arg arg If Compare Assign Call If Compare Assign Call If BoolOp Compare Compare Return return:yes If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SavedModelSaver", + "source_code": "class SavedModelSaver(object, metaclass=abc.ABCMeta):\n\n def __init__(self, obj):\n self.obj = obj\n\n @abc.abstractproperty\n def object_identifier(self):\n raise NotImplementedError\n\n @property\n def tracking_metadata(self):\n return json_utils.Encoder().encode(self.python_properties)\n\n def trackable_children(self, serialization_cache):\n if not utils.should_save_traces():\n return {}\n children = self.objects_to_serialize(serialization_cache)\n children.update(self.functions_to_serialize(serialization_cache))\n return children\n\n @abc.abstractproperty\n def python_properties(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def objects_to_serialize(self, serialization_cache):\n raise NotImplementedError\n\n @abc.abstractmethod\n def functions_to_serialize(self, serialization_cache):\n raise NotImplementedError", + "docstring": "Saver defining the methods and properties used to serialize Keras objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py", + "ast_data": "ClassDef name:SavedModelSaver FunctionDef name:__init__ arg:self arg:obj arguments arg arg Assign FunctionDef name:object_identifier arg:self arguments arg Raise FunctionDef name:tracking_metadata arg:self arguments arg Return return:yes Call Call FunctionDef name:trackable_children arg:self arg:serialization_cache arguments arg arg If Call Return return:no Assign Call Call Call Return return:yes FunctionDef name:python_properties arg:self arguments arg Raise FunctionDef name:objects_to_serialize arg:self arg:serialization_cache arguments arg arg Raise FunctionDef name:functions_to_serialize arg:self arg:serialization_cache arguments arg arg Raise" + }, + { + "library": "pytorch", + "name": "acquire", + "source_code": "@abc.abstractmethod\ndef acquire(self, scope_id: str, expiration_time: float) -> None:\n pass", + "docstring": "Acquires a timer for the worker that holds this client object given the scope_id and expiration_time. Typically registers the timer with the TimerServer.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py", + "ast_data": "FunctionDef name:acquire arg:self arg:scope_id arg:expiration_time arguments arg arg arg" + }, + { + "library": "scikit-learn", + "name": "fit_predict", + "source_code": "@property\ndef fit_predict(self):\n raise AttributeError", + "docstring": "Fit and return the result of each sample's clustering assignment.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py", + "ast_data": "FunctionDef name:fit_predict arg:self arguments arg Raise" + }, + { + "library": "pygame", + "name": "__repr__", + "source_code": "def __repr__(self):\n return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))", + "docstring": "Turn the class into a string.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "pyarrow_array_to_numpy_and_mask", + "source_code": "def pyarrow_array_to_numpy_and_mask(arr, dtype: np.dtype) -> tuple[np.ndarray, np.ndarray]:\n dtype = np.dtype(dtype)\n if pyarrow.types.is_null(arr.type):\n data = np.empty(len(arr), dtype=dtype)\n mask = np.zeros(len(arr), dtype=bool)\n return (data, mask)\n buflist = arr.buffers()\n offset = arr.offset * dtype.itemsize\n length = len(arr) * dtype.itemsize\n data_buf = buflist[1][offset:offset + length]\n data = np.frombuffer(data_buf, dtype=dtype)\n bitmask = buflist[0]\n if bitmask is not None:\n mask = pyarrow.BooleanArray.from_buffers(pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset)\n mask = np.asarray(mask)\n else:\n mask = np.ones(len(arr), dtype=bool)\n return (data, mask)", + "docstring": "Convert a primitive pyarrow.Array to a numpy array and boolean mask based on the buffers of the Array. At the moment pyarrow.BooleanArray is not supported. Parameters ---------- arr : pyarrow.Array dtype : numpy.dtype Returns ------- (data, mask) Tuple of two numpy arrays with the raw data (with specified dtype) and a boolean mask (validity mask, so False means missing)", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\_arrow_utils.py", + "ast_data": "FunctionDef name:pyarrow_array_to_numpy_and_mask arg:arr arg:dtype arguments arg arg Assign Call If Call Assign Call Call Assign Call Call Return return:yes Assign Call Assign Assign Call Assign Assign Call Assign If Compare Assign Call Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "run_once", + "source_code": "def run_once(self, *args, **kwargs):\n return self._benchmark.run_once(*args, **kwargs)", + "docstring": "Given input id (input_idx) run benchmark once and return prediction. This is useful for testing that benchmark actually runs the module you want it to run. input_idx here is an index into inputs array populated by calling add_input() method.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\throughput_benchmark.py", + "ast_data": "FunctionDef name:run_once arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_original_model_type", + "source_code": "def _get_original_model_type(self):\n model_type = TFLiteConverterBase._original_model_type\n TFLiteConverterBase._original_model_type = conversion_metadata_fb.ModelType.NONE\n return model_type", + "docstring": "One-time getter to return original model type and set it to NONE.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_get_original_model_type arg:self arguments arg Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "legadd", + "source_code": "def legadd(c1, c2):\n return pu._add(c1, c2)", + "docstring": "Add one Legendre series to another. Returns the sum of two Legendre series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Legendre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Legendre series of their sum. See Also -------- legsub, legmulx, legmul, legdiv, legpow Notes ----- Unlike multiplication, division, etc., the sum of two Legendre series is a Legendre series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legadd(c1,c2) array([4., 4., 4.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legadd arg:c1 arg:c2 arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "tf2ss", + "source_code": "def tf2ss(num, den):\n num, den = normalize(num, den)\n nn = len(num.shape)\n if nn == 1:\n num = asarray([num], num.dtype)\n M = num.shape[1]\n K = len(den)\n if M > K:\n msg = 'Improper transfer function. `num` is longer than `den`.'\n raise ValueError(msg)\n if M == 0 or K == 0:\n return (array([], float), array([], float), array([], float), array([], float))\n num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num))\n if num.shape[-1] > 0:\n D = atleast_2d(num[:, 0])\n else:\n D = array([[0]], float)\n if K == 1:\n D = D.reshape(num.shape)\n return (zeros((1, 1)), zeros((1, D.shape[1])), zeros((D.shape[0], 1)), D)\n frow = -array([den[1:]])\n A = r_[frow, eye(K - 2, K - 1)]\n B = eye(K - 1, 1)\n C = num[:, 1:] - outer(num[:, 0], den[1:])\n D = D.reshape((C.shape[0], B.shape[1]))\n return (A, B, C, D)", + "docstring": "Transfer function to state-space representation. Parameters ---------- num, den : array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree. The denominator needs to be at least as long as the numerator. Returns ------- A, B, C, D : ndarray State space representation of the system, in controller canonical form. Examples -------- Convert the transfer function: .. math:: H(s) = \\frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> num = [1, 3, 3] >>> den = [1, 2, 1] to the state-space representation: .. math:: \\dot{\\textbf{x}}(t) = \\begin{bmatrix} -2 & -1 \\\\ 1 & 0 \\end{bmatrix} \\textbf{x}(t) + \\begin{bmatrix} 1 \\\\ 0 \\end{bmatrix} \\textbf{u}(t) \\\\ \\textbf{y}(t) = \\begin{bmatrix} 1 & 2 \\end{bmatrix} \\textbf{x}(t) + \\begin{bmatrix} 1 \\end{bmatrix} \\textbf{u}(t) >>> from scipy.signal import tf2ss >>> A, B, C, D = tf2ss(num, den) >>> A array([[-2., -1.], [ 1., 0.]]) >>> B array([[ 1.], [ 0.]]) >>> C array([[ 1., 2.]]) >>> D array([[ 1.]])", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_lti_conversion.py", + "ast_data": "FunctionDef name:tf2ss arg:num arg:den arguments arg arg Assign Call Assign Call If Compare Assign Call Assign Assign Call If Compare Assign Raise Call If BoolOp Compare Compare Return return:yes Call Call Call Call Assign Call Call If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, debug_dump, config):\n self._debug_dump = debug_dump\n self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)\n self._tensor_filters = {}\n self._build_argument_parsers(config)\n config.set_callback('graph_recursion_depth', self._build_argument_parsers)", + "docstring": "DebugAnalyzer constructor. Args: debug_dump: A DebugDumpDir object. config: A object that carries user-facing configurations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:debug_dump arg:config arguments arg arg arg Assign Assign Call Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "is_pandas_na", + "source_code": "def is_pandas_na(x):\n with suppress(ImportError):\n from pandas import NA\n return x is NA\n return False", + "docstring": "Test if x is pandas.NA. We intentionally do not use this function to return for in , because estimators that support are the exception rather than the rule at the moment. When is more universally supported, we may reconsider this decision. Parameters ---------- x : any type Returns ------- boolean", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_missing.py", + "ast_data": "FunctionDef name:is_pandas_na arg:x arguments arg With Call Return return:yes Compare Return return:yes" + }, + { + "library": "scipy", + "name": "_funm_multiply_krylov_lanczos", + "source_code": "def _funm_multiply_krylov_lanczos(A, b, bnorm, V, H, m):\n dotprod = np.vdot if np.iscomplexobj(b) else np.dot\n norm_tol = np.finfo(b.dtype.char).eps ** 2\n V[:, 0] = b / bnorm\n for k in range(0, m):\n if k > 0:\n V[:, k + 1] = A.dot(V[:, k]) - H[k, k - 1] * V[:, k - 1]\n else:\n V[:, k + 1] = A.dot(V[:, k])\n H[k, k] = dotprod(V[:, k + 1], V[:, k])\n V[:, k + 1] = V[:, k + 1] - H[k, k] * V[:, k]\n H[k + 1, k] = norm(V[:, k + 1])\n if H[k + 1, k] < norm_tol:\n return (True, k)\n V[:, k + 1] = V[:, k + 1] / H[k + 1, k]\n if k < m - 1:\n H[k, k + 1] = H[k + 1, k]\n return (False, m)", + "docstring": "The Lanczos iteration for constructing the basis V and the projection H = V * A V for the Krylov subspace Km(A, b) of order m. A must be Hermitian. Parameters ---------- A : transposable linear operator The operator whose matrix function is of interest. b : ndarray The vector b to multiply the f(A) with. V : ndarray The n x (m + 1) matrix whose columns determines the basis for Krylov subspace Km(A, b). H : ndarray A (m + 1) x m upper Hessenberg matrix representing the projection of A onto Km(A, b). m : int The order of the Krylov subspace. Returns ------- breakdown : bool Indicate if the Arnoldi broke down or not iter : int Returns the last valid iteration.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_funm_multiply_krylov.py", + "ast_data": "FunctionDef name:_funm_multiply_krylov_lanczos arg:A arg:b arg:bnorm arg:V arg:H arg:m arguments arg arg arg arg arg arg Assign Call Assign Call Assign For Call If Compare Assign Call Assign Call Assign Call Assign Assign Call If Compare Return return:yes Assign If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "float8_e4m3fn", + "source_code": "def float8_e4m3fn(self):\n _warn_typed_storage_removal()\n return self._to(torch.float8_e4m3fn)", + "docstring": "Casts this storage to float8_e4m3fn type", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:float8_e4m3fn arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "module_class_name", + "source_code": "@property\ndef module_class_name(self) -> str:\n if self._module_class is None:\n return ''\n if isinstance(self._module_class, type):\n return self._module_class.__name__\n return self._module_class", + "docstring": "Name of the module class. E.g. .", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:module_class_name arg:self arguments arg If Compare Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "fuzzy_list_to_dict", + "source_code": "def fuzzy_list_to_dict(items: list[tuple[str, str]]) -> dict[str, list[str]]:\n rc: dict[str, list[str]] = defaultdict(list)\n for key, val in items:\n rc[key].append(val)\n return dict(rc)", + "docstring": "Converts list to dict preserving elements with duplicate keys", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\gitutils.py", + "ast_data": "FunctionDef name:fuzzy_list_to_dict arg:items arguments arg Call For Call Return return:yes Call" + }, + { + "library": "django", + "name": "unpack", + "source_code": "def unpack(structure, data):\n return struct.unpack('<' + structure, bytes.fromhex(data))", + "docstring": "Unpack little endian hexlified binary string into a list.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\pgraster.py", + "ast_data": "FunctionDef name:unpack arg:structure arg:data arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "StrictMinMaxConstraint", + "source_code": "@dataclass(frozen=True)\nclass StrictMinMaxConstraint(Constraint):\n vr: ValueRanges\n\n def render(self, source: Source) -> str:\n return f'{self.vr.lower} <= {source.name()} <= {self.vr.upper}'", + "docstring": "For clients: the size at this dimension must be within 'vr' (which specifies a lower and upper bound, inclusive-inclusive) AND it must be non-negative and should not be 0 or 1 (but see NB below). For backends: there must not be any guards on this dimension which are not implied by the given lower and upper bound. Regardless of the lower bound, the backend can assume the size is non-negative and that it is not 0 or 1. An unbounded StrictMinMaxConstraint can be thought of as a strict version of \"RelaxedUnspecConstraint\". NB: Export will often unsoundly assume that a graph works for 0/1, even though at trace time we assumed size is not 0 or 1. The idea is that if we produce a graph that works for a range of values, it will be OK for N=0/1 too.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "ClassDef name:StrictMinMaxConstraint FunctionDef name:render arg:self arg:source arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n raw_predictions = self.decision_function(X)\n if raw_predictions.ndim == 1:\n encoded_classes = (raw_predictions >= 0).astype(int)\n else:\n encoded_classes = np.argmax(raw_predictions, axis=1)\n return self.classes_[encoded_classes]", + "docstring": "Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Compare Assign Call Return return:yes" + }, + { + "library": "django", + "name": "set_rollback", + "source_code": "def set_rollback(rollback, using=None):\n return get_connection(using).set_rollback(rollback)", + "docstring": "Set or unset the \"needs rollback\" flag -- for *advanced use* only. When is , trigger a rollback when exiting the innermost enclosing atomic block that has (that's the default). Use this to force a rollback without raising an exception. When is , prevent such a rollback. Use this only after rolling back to a known-good state! Otherwise, you break the atomic block and data corruption may occur.", + "type": "function", + "file_path": "django\\django\\db\\transaction.py", + "ast_data": "FunctionDef name:set_rollback arg:rollback arg:using arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_custom_objects_by_name", + "source_code": "def get_custom_objects_by_name(item, custom_objects=None):\n if item in _GLOBAL_CUSTOM_OBJECTS:\n return _GLOBAL_CUSTOM_OBJECTS[item]\n elif custom_objects and item in custom_objects:\n return custom_objects[item]\n return None", + "docstring": "Returns the item if it is in either local or global custom objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", + "ast_data": "FunctionDef name:get_custom_objects_by_name arg:item arg:custom_objects arguments arg arg If Compare Return return:yes If BoolOp Compare Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "disable_fake_quant", + "source_code": "def disable_fake_quant(mod):\n if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):\n mod.disable_fake_quant()", + "docstring": "Disable fake quantization for the module. Disable fake quantization for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_fake_quant)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py", + "ast_data": "FunctionDef name:disable_fake_quant arg:mod arguments arg If BoolOp Call Call Call" + }, + { + "library": "scipy", + "name": "put_variables", + "source_code": "def put_variables(self, mdict, write_header=None):\n self._matrix_writer = VarWriter4(self)\n for name, var in mdict.items():\n self._matrix_writer.write(var, name)", + "docstring": "Write variables in to stream Parameters ---------- mdict : mapping mapping with method `` is something writeable to a matlab file, such as a NumPy array. write_header : {None, True, False} If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:put_variables arg:self arg:mdict arg:write_header arguments arg arg arg Assign Call For Call Call" + }, + { + "library": "tensorflow", + "name": "get_c_function", + "source_code": "def get_c_function(name):\n return context().get_c_function(name)", + "docstring": "Get a C API TF_Function from the context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:get_c_function arg:name arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "set_fill_value", + "source_code": "def set_fill_value(a, fill_value):\n if isinstance(a, MaskedArray):\n a.set_fill_value(fill_value)", + "docstring": "Set the filling value of a, if a is a masked array. This function changes the fill value of the masked array in place. If is not a masked array, the function returns silently, without doing anything. Parameters ---------- a : array_like Input array. fill_value : dtype Filling value. A consistency test is performed to make sure the value is compatible with the dtype of . Returns ------- None Nothing returned by this function. See Also -------- maximum_fill_value : Return the default fill value for a dtype. MaskedArray.fill_value : Return current fill value. MaskedArray.set_fill_value : Equivalent method. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> a = ma.masked_where(a >> a masked_array(data=[--, --, --, 3, 4], mask=[ True, True, True, False, False], fill_value=999999) >>> ma.set_fill_value(a, -999) >>> a masked_array(data=[--, --, --, 3, 4], mask=[ True, True, True, False, False], fill_value=-999) Nothing happens if is not a masked array. >>> a = list(range(5)) >>> a [0, 1, 2, 3, 4] >>> ma.set_fill_value(a, 100) >>> a [0, 1, 2, 3, 4] >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> ma.set_fill_value(a, 100) >>> a array([0, 1, 2, 3, 4])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:set_fill_value arg:a arg:fill_value arguments arg arg If Call Call" + }, + { + "library": "kornia", + "name": "get_affine_matrix2d", + "source_code": "def get_affine_matrix2d(translations: Tensor, center: Tensor, scale: Tensor, angle: Tensor, sx: Optional[Tensor]=None, sy: Optional[Tensor]=None) -> Tensor:\n transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations\n transform_h = convert_affinematrix_to_homography(transform)\n if any((s is not None for s in [sx, sy])):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n return transform_h", + "docstring": "Compose affine matrix from the components. Args: translations: tensor containing the translation vector with shape :math:. center: tensor containing the center vector with shape :math:. scale: tensor containing the scale factor with shape :math:. angle: tensor of angles in degrees :math:. sx: tensor containing the shear factor in the x-direction with shape :math:. sy: tensor containing the shear factor in the y-direction with shape :math:. Returns: the affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:, :func:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", + "ast_data": "FunctionDef name:get_affine_matrix2d arg:translations arg:center arg:scale arg:angle arg:sx arg:sy arguments arg arg arg arg arg arg Call Assign Call If Call Compare Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "current_device", + "source_code": "def current_device() -> int:\n _lazy_init()\n return torch._C._xpu_getDevice()", + "docstring": "Return the index of a currently selected device.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:current_device arguments Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "reset", + "source_code": "def reset(self) -> None:\n self._mean.reset()", + "docstring": "Reset the state captured by calls.", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\ewm.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg Call" + }, + { + "library": "pandas", + "name": "_gen_rows_without_counts", + "source_code": "def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n yield from zip(self._gen_line_numbers(), self._gen_columns(), self._gen_dtypes())", + "docstring": "Iterator with string representation of body data without counts.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:_gen_rows_without_counts arg:self arguments arg Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_distribute_dataset", + "source_code": "def experimental_distribute_dataset(self, dataset, options=None):\n if options and options.experimental_replication_moden == distribute_lib.InputReplicationMode.PER_REPLICA:\n raise NotImplementedError('InputReplicationMode.PER_REPLICA is only supported in `experimental_distribute_datasets_from_function`.')\n return super(CentralStorageStrategy, self).experimental_distribute_dataset(dataset, options)", + "docstring": "Distributes a tf.data.Dataset instance provided via dataset. The returned dataset is a wrapped strategy dataset which creates a multidevice iterator under the hood. It prefetches the input data to the specified devices on the worker. The returned distributed dataset can be iterated over similar to how regular datasets can. NOTE: Currently, the user cannot add any more transformations to a distributed dataset. For Example: Args: dataset: to be prefetched to device. options: used to control options on how this dataset is distributed. Returns: A \"distributed \" that the caller can iterate over.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\central_storage_strategy.py", + "ast_data": "FunctionDef name:experimental_distribute_dataset arg:self arg:dataset arg:options arguments arg arg arg If BoolOp Compare Raise Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "info", + "source_code": "def info(self) -> PipeInfo:\n return PipeInfo(graph=self.split_gm.graph, num_stages=self.num_stages, has_loss_and_backward=self.has_loss_and_backward)", + "docstring": "Get information about the pipe. Returns ------- PipeInfo A dataclass containing information about the pipe.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", + "ast_data": "FunctionDef name:info arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "logical_op", + "source_code": "def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n\n def fill_bool(x, left=None):\n if x.dtype.kind in 'cfO':\n mask = isna(x)\n if mask.any():\n x = x.astype(object)\n x[mask] = False\n if left is None or left.dtype.kind == 'b':\n x = x.astype(bool)\n return x\n right = lib.item_from_zerodim(right)\n if is_list_like(right) and (not hasattr(right, 'dtype')):\n raise TypeError('Logical ops (and, or, xor) between Pandas objects and dtype-less sequences (e.g. list, tuple) are no longer supported. Wrap the object in a Series, Index, or np.array before operating instead.')\n lvalues = ensure_wrapped_if_datetimelike(left)\n rvalues = right\n if should_extension_dispatch(lvalues, rvalues):\n res_values = op(lvalues, rvalues)\n else:\n if isinstance(rvalues, np.ndarray):\n is_other_int_dtype = rvalues.dtype.kind in 'iu'\n if not is_other_int_dtype:\n rvalues = fill_bool(rvalues, lvalues)\n else:\n is_other_int_dtype = lib.is_integer(rvalues)\n res_values = na_logical_op(lvalues, rvalues, op)\n if not (left.dtype.kind in 'iu' and is_other_int_dtype):\n res_values = fill_bool(res_values)\n return res_values", + "docstring": "Evaluate a logical operation , , or . Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.and_, operator.or_, operator.xor} Or one of the reversed variants from roperator. Returns ------- ndarray or ExtensionArray", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\array_ops.py", + "ast_data": "FunctionDef name:logical_op arg:left arg:right arg:op arguments arg arg arg FunctionDef name:fill_bool arg:x arg:left arguments arg arg If Compare Assign Call If Call Assign Call Assign If BoolOp Compare Compare Assign Call Return return:yes Assign Call If BoolOp Call Call Raise Call Assign Call Assign If Call Assign Call If Call Assign Compare If Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_if_scalar_type_as", + "source_code": "def _if_scalar_type_as(self, tensor):\n if isinstance(self, _C.Value):\n return self\n scalar_type = _type_utils.JitScalarType.from_value(tensor, _type_utils.JitScalarType.UNDEFINED)\n if scalar_type != _type_utils.JitScalarType.UNDEFINED:\n ty = scalar_type.scalar_name().lower()\n return getattr(self, ty)()\n return self", + "docstring": "Convert self into the same type of tensor, as necessary. We only support implicit casting for scalars, so we never actually need to insert an ONNX cast operator here; just fix up the scalar.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py", + "ast_data": "FunctionDef name:_if_scalar_type_as arg:self arg:tensor arguments arg arg If Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "SymbolTable", + "source_code": "class SymbolTable(object):\n\n def __init__(self):\n self.symbols = []\n self.enter_scope()\n self.scf_scope = 0\n self.insert_symbol('len', 'len', TFRTypes.PY_BUILTIN_FUNC)\n\n def enter_scope(self, scf_scope=False):\n self.symbols.append({'types': {}, 'symbols': {}})\n self.curr_table = self.symbols[len(self.symbols) - 1]\n if scf_scope:\n self.scf_scope += 1\n\n def insert_symbol(self, name, value, type_):\n self.curr_table['symbols'][name] = (value, type_)\n self.curr_table['types'][name] = type_\n return value\n\n def exit_scope(self):\n self.symbols.pop()\n self.curr_table = self.symbols[len(self.symbols) - 1]\n if self.scf_scope > 0:\n self.scf_scope -= 1\n\n def in_scf_scope(self):\n return self.scf_scope > 0\n\n def lookup(self, name):\n curr_idx = len(self.symbols) - 1\n while curr_idx >= 0 and name not in self.symbols[curr_idx]['symbols']:\n curr_idx -= 1\n if curr_idx < 0:\n return None\n return self.symbols[curr_idx]['symbols'][name]", + "docstring": "Symbol Table for python code.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "ClassDef name:SymbolTable FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Call FunctionDef name:enter_scope arg:self arg:scf_scope arguments arg arg Call Assign Call If FunctionDef name:insert_symbol arg:self arg:name arg:value arg:type_ arguments arg arg arg arg Assign Assign Return return:yes FunctionDef name:exit_scope arg:self arguments arg Call Assign Call If Compare FunctionDef name:in_scf_scope arg:self arguments arg Return return:yes Compare FunctionDef name:lookup arg:self arg:name arguments arg arg Assign Call While BoolOp Compare Compare If Compare Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "_initialize", + "source_code": "def _initialize(self, args, kwds, add_initializers_to=None):\n created_variables = []\n\n def variable_capturing_scope(next_creator, **kwds):\n enable_variable_lifting = kwds.get('experimental_enable_variable_lifting')\n if enable_variable_lifting is None:\n enable_variable_lifting = True\n if not enable_variable_lifting:\n return next_creator(**kwds)\n v = UnliftedInitializerVariable(add_initializers_to=add_initializers_to, **kwds)\n created_variables.append(weakref.ref(v))\n return v\n self._created_variables = created_variables\n self._variable_creation_config = self._generate_scoped_tracing_options(variable_capturing_scope, tracing_compilation.ScopeType.VARIABLE_CREATION)\n self._concrete_variable_creation_fn = tracing_compilation.trace_function(args, kwds, self._variable_creation_config)\n\n def invalid_creator_scope(*unused_args, **unused_kwds):\n raise ValueError('tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See https://www.tensorflow.org/guide/function#creating_tfvariables for more information.')\n self._no_variable_creation_config = self._generate_scoped_tracing_options(invalid_creator_scope, tracing_compilation.ScopeType.NO_VARIABLE_CREATION)", + "docstring": "Initializes, on the first call. Creates two s, one that will allow creation of variables and one that won't. Additionally runs a trace for the that allows creation of variables. Args: args: Arguments to the underlying python callable. kwds: Keyword arguments to the python callable. add_initializers_to: Where to collect variable initializers, if not None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:_initialize arg:self arg:args arg:kwds arg:add_initializers_to arguments arg arg arg arg Assign FunctionDef name:variable_capturing_scope arg:next_creator arguments arg arg Assign Call If Compare Assign If Return return:yes Call Assign Call Call Call Return return:yes Assign Assign Call Assign Call FunctionDef name:invalid_creator_scope arguments arg arg Raise Call Assign Call" + }, + { + "library": "tensorflow", + "name": "cast", + "source_code": "def cast(i, p):\n return i * p + (1 - i) * (eye - p)", + "docstring": "Return p or (1-p).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:cast arg:i arg:p arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_profile_data_generator", + "source_code": "def _get_profile_data_generator(self):\n node_to_file_path = {}\n node_to_line_number = {}\n node_to_func_name = {}\n node_to_op_type = {}\n for op in self._graph.get_operations():\n for trace_entry in reversed(op.traceback):\n file_path = trace_entry[0]\n line_num = trace_entry[1]\n func_name = trace_entry[2]\n if not source_utils.guess_is_tensorflow_py_library(file_path):\n break\n node_to_file_path[op.name] = file_path\n node_to_line_number[op.name] = line_num\n node_to_func_name[op.name] = func_name\n node_to_op_type[op.name] = op.type\n\n def profile_data_generator(device_step_stats):\n for node_stats in device_step_stats.node_stats:\n if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n continue\n yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, ''))\n return profile_data_generator", + "docstring": "Get function that generates objects. Returns: A function that generates objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", + "ast_data": "FunctionDef name:_get_profile_data_generator arg:self arguments arg Assign Assign Assign Assign For Call For Call Assign Assign Assign If Call Assign Assign Assign Assign FunctionDef name:profile_data_generator arg:device_step_stats arguments arg For If BoolOp Compare Compare Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ExtractInputShapes", + "source_code": "def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape, tensor.Tensor) or input_shape.op.type != 'Const':\n fully_known = False\n break\n sizes.append(input_shape)\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)", + "docstring": "Extract the shapes of a set of input tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_ExtractInputShapes arg:inputs arguments arg If Call Return return:yes Call Assign Assign For Assign Call If BoolOp Call Compare Assign Call If Return return:yes Return return:yes Call" + }, + { + "library": "scrapy", + "name": "_get_serialized_fields", + "source_code": "def _get_serialized_fields(self, item: Any, default_value: Any=None, include_empty: bool | None=None) -> Iterable[tuple[str, Any]]:\n item = ItemAdapter(item)\n if include_empty is None:\n include_empty = self.export_empty_fields\n if self.fields_to_export is None:\n field_iter = item.field_names() if include_empty else item.keys()\n elif isinstance(self.fields_to_export, Mapping):\n if include_empty:\n field_iter = self.fields_to_export.items()\n else:\n field_iter = ((x, y) for x, y in self.fields_to_export.items() if x in item)\n elif include_empty:\n field_iter = self.fields_to_export\n else:\n field_iter = (x for x in self.fields_to_export if x in item)\n for field_name in field_iter:\n if isinstance(field_name, str):\n item_field, output_field = (field_name, field_name)\n else:\n item_field, output_field = field_name\n if item_field in item:\n field_meta = item.get_field_meta(item_field)\n value = self.serialize_field(field_meta, output_field, item[item_field])\n else:\n value = default_value\n yield (output_field, value)", + "docstring": "Return the fields to export as an iterable of tuples (name, serialized_value)", + "type": "method", + "file_path": "scrapy\\scrapy\\exporters.py", + "ast_data": "FunctionDef name:_get_serialized_fields arg:self arg:item arg:default_value arg:include_empty arguments arg arg arg arg Assign Call If Compare Assign If Compare Assign Call Call If Call If Assign Call Assign Call Compare If Assign Assign Compare For If Call Assign Assign If Compare Assign Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_init_writer", + "source_code": "def _init_writer(self, model):\n if context.executing_eagerly():\n self.writer = summary_ops_v2.create_file_writer_v2(self.log_dir)\n if not model.run_eagerly and self.write_graph:\n with self.writer.as_default():\n summary_ops_v2.graph(K.get_graph())\n elif self.write_graph:\n self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())\n else:\n self.writer = tf_summary.FileWriter(self.log_dir)", + "docstring": "Sets file writer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py", + "ast_data": "FunctionDef name:_init_writer arg:self arg:model arguments arg arg If Call Assign Call If BoolOp With Call Call Call If Assign Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "shutdown", + "source_code": "def shutdown(self, wait: bool=True) -> None:\n if self.alive():\n TuningProcess.send(None, self.write_pipe)\n if wait:\n self.wait()", + "docstring": "Signal the child process to shut down gracefully.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:shutdown arg:self arg:wait arguments arg arg If Call Call If Call" + }, + { + "library": "tensorflow", + "name": "conv_input_length", + "source_code": "def conv_input_length(output_length, filter_size, padding, stride):\n if output_length is None:\n return None\n assert padding in {'same', 'valid', 'full'}\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n return (output_length - 1) * stride - 2 * pad + filter_size", + "docstring": "Determines input length of a convolution given output length. Args: output_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. Returns: The input length (integer).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py", + "ast_data": "FunctionDef name:conv_input_length arg:output_length arg:filter_size arg:padding arg:stride arguments arg arg arg arg If Compare Return return:no Compare If Compare Assign If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "run", + "source_code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n if self.should_stop():\n raise RuntimeError('Run called even after should_stop requested.')\n actual_fetches = {'caller': fetches}\n run_context = session_run_hook.SessionRunContext(original_args=session_run_hook.SessionRunArgs(fetches, feed_dict), session=self._sess)\n options = options or config_pb2.RunOptions()\n feed_dict = self._call_hook_before_run(run_context, actual_fetches, feed_dict, options)\n run_metadata = run_metadata or config_pb2.RunMetadata()\n outputs = _WrappedSession.run(self, fetches=actual_fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)\n for hook in self._hooks:\n hook.after_run(run_context, session_run_hook.SessionRunValues(results=outputs[hook] if hook in outputs else None, options=options, run_metadata=run_metadata))\n self._should_stop = self._should_stop or run_context.stop_requested\n return outputs['caller']", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:run arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata arguments arg arg arg arg arg If Call Raise Call Assign Assign Call Call Assign BoolOp Call Assign Call Assign BoolOp Call Assign Call For Call Call Compare Assign BoolOp Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ImagGrad", + "source_code": "@ops.RegisterGradient('Imag')\ndef _ImagGrad(_, grad):\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(zero, grad)", + "docstring": "Returns 'grad' as the imaginary part and set the real part 0.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_ImagGrad arg:_ arg:grad arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_awaitable_nowait", + "source_code": "def _awaitable_nowait(o):\n return torch._C._awaitable_nowait(o)", + "docstring": "Create completed Await with specified result.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_await.py", + "ast_data": "FunctionDef name:_awaitable_nowait arg:o arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_minor_formatter", + "source_code": "def get_minor_formatter(self):\n return self.minor.formatter", + "docstring": "Get the formatter of the minor ticker.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_minor_formatter arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "benchmark", + "source_code": "def benchmark(self, choices: list[TritonTemplateCaller]) -> dict[TritonTemplateCaller, float]:\n results = dict(zip(choices, self.executor.map(self.target, choices)))\n return results", + "docstring": "Benchmark each choice in a separate process.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:benchmark arg:self arg:choices arguments arg arg Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "contains", + "source_code": "def contains(self, mouseevent, radius=None):\n if self._different_canvas(mouseevent):\n return (False, {})\n radius = self._process_radius(radius)\n codes = self.get_path().codes\n if codes is not None:\n vertices = self.get_path().vertices\n idxs, = np.where(codes == Path.MOVETO)\n idxs = idxs[1:]\n subpaths = map(Path, np.split(vertices, idxs), np.split(codes, idxs))\n else:\n subpaths = [self.get_path()]\n inside = any((subpath.contains_point((mouseevent.x, mouseevent.y), self.get_transform(), radius) for subpath in subpaths))\n return (inside, {})", + "docstring": "Test whether the mouse event occurred in the patch. Parameters ---------- mouseevent : Where the user clicked. radius : float, optional Additional margin on the patch in target coordinates of . See for further details. If , the default value depends on the state of the object: - If is a number, the default is that value. This is so that picking works as expected. - Otherwise if the edge color has a non-zero alpha, the default is half of the linewidth. This is so that all the colored pixels are \"in\" the patch. - Finally, if the edge has 0 alpha, the default is 0. This is so that patches without a stroked edge do not have points outside of the filled region report as \"in\" due to an invisible edge. Returns ------- (bool, empty dict)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arg:radius arguments arg arg arg If Call Return return:yes Assign Call Assign Call If Compare Assign Call Assign Call Compare Assign Assign Call Call Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, values, indices, dense_shape=None):\n self._values = values\n self._indices = indices\n self._dense_shape = dense_shape", + "docstring": "Creates an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:indices arg:dense_shape arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "_print_verbose_msg_iter_end", + "source_code": "def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n if n_iter % self.verbose_interval == 0:\n if self.verbose == 1:\n print(' Iteration %d' % n_iter)\n elif self.verbose >= 2:\n cur_time = time()\n print(' Iteration %d\\t time lapse %.5fs\\t ll change %.5f' % (n_iter, cur_time - self._iter_prev_time, diff_ll))\n self._iter_prev_time = cur_time", + "docstring": "Print verbose message on initialization.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_print_verbose_msg_iter_end arg:self arg:n_iter arg:diff_ll arguments arg arg arg If Compare If Compare Call If Compare Assign Call Call Assign" + }, + { + "library": "matplotlib", + "name": "set_xdata", + "source_code": "def set_xdata(self, x):\n if not np.iterable(x):\n raise RuntimeError('x must be a sequence')\n self._xorig = copy.copy(x)\n self._invalidx = True\n self.stale = True", + "docstring": "Set the data array for x. Parameters ---------- x : 1D array See Also -------- set_data set_ydata", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_xdata arg:self arg:x arguments arg arg If Call Raise Call Assign Call Assign Assign" + }, + { + "library": "pandas", + "name": "_index_as_unique", + "source_code": "@property\ndef _index_as_unique(self) -> bool:\n return self.is_unique", + "docstring": "Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_index_as_unique arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, 'n_features_in_')\n input_features = _check_feature_names_in(self, input_features)\n if hasattr(self, '_encoder'):\n return self._encoder.get_feature_names_out(input_features)\n return input_features", + "docstring": "Get output feature names. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "process_bounds", + "source_code": "def process_bounds(bounds, lenx0):\n if bounds is None:\n lb = np.array([-np.inf] * lenx0, dtype=np.float64)\n ub = np.array([np.inf] * lenx0, dtype=np.float64)\n return (lb, ub)\n if isinstance(bounds, Bounds):\n lb = np.array(bounds.lb, dtype=np.float64)\n ub = np.array(bounds.ub, dtype=np.float64)\n lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))\n ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))\n return (lb, ub)\n lb = np.array([bound[0] if bound[0] is not None else -np.inf for bound in bounds], dtype=np.float64)\n ub = np.array([bound[1] if bound[1] is not None else np.inf for bound in bounds], dtype=np.float64)\n lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))\n ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))\n return (lb, ub)", + "docstring": "can either be an object with the properties lb and ub, or a list of tuples indicating a lower bound and an upper bound for each variable. If the list contains fewer entries than the length of x0, the remaining entries will generated as -/+ infinity. Some examples of valid lists of tuple, assuming len(x0) == 3: [(0, 1), (2, 3), (4, 5)] -> returns [0, 2, 4], [1, 3, 5] [(0, 1), (None, 3)] -> returns [0, -inf, -inf], [1, 3, inf] [(0, 1), (-np.inf, 3)] -> returns [0, -inf, -inf], [1, 3, inf]", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\_bounds.py", + "ast_data": "FunctionDef name:process_bounds arg:bounds arg:lenx0 arguments arg arg If Compare Assign Call Assign Call Return return:yes If Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Assign Call Compare Assign Call Compare Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "size", + "source_code": "def size(self, name):\n raise NotImplementedError('subclasses of Storage must provide a size() method')", + "docstring": "Return the total size, in bytes, of the file specified by name.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_ConsolidatedOptimState", + "source_code": "@dataclass\nclass _ConsolidatedOptimState:\n tensor_state: dict[str, torch.Tensor] = field(default_factory=dict)\n zero_dim_tensor_state: dict[str, torch.Tensor] = field(default_factory=dict)\n non_tensor_state: dict[str, Any] = field(default_factory=dict)", + "docstring": "This holds the consolidated optimizer state on the target rank. Positive- dimension tensor state is communicated across ranks, while zero-dimension tensor state and non-tensor state is taken directly from the target rank. PyTorch version 1.12 moved to using zero-dimension tensors for scalar values, but user implemented optimizers may still use float (i.e. a non-tensor). Thus, we support both and handle them identically. Attributes: tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension tensor state name to the unsharded flat tensor representing the state. zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero- dimension tensor state name to its value. non_tensor_state (Dict[str, Any]): Mapping from non-tensor state name to its value.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py", + "ast_data": "ClassDef name:_ConsolidatedOptimState Call Call Call" + }, + { + "library": "pytorch", + "name": "is_bf16_supported", + "source_code": "def is_bf16_supported(including_emulation: bool=True):\n if torch.version.hip:\n return True\n if not is_available():\n return False\n device = torch.cuda.current_device()\n cuda_version = torch.version.cuda\n if cuda_version is not None and torch.cuda.get_device_properties(device).major >= 8:\n return True\n if not including_emulation:\n return False\n return _check_bf16_tensor_supported(device)", + "docstring": "Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:is_bf16_supported arg:including_emulation arguments arg If Return return:yes If Call Return return:yes Assign Call Assign If BoolOp Compare Compare Call Return return:yes If Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "__get__", + "source_code": "def __get__(self, instance, cls=None):\n if instance is None:\n return self\n if isinstance(instance._route, str):\n instance.__dict__['regex'] = re.compile(instance._regex)\n return instance.__dict__['regex']\n language_code = get_language()\n if language_code not in instance._regex_dict:\n instance._regex_dict[language_code] = re.compile(_route_to_regex(str(instance._route), instance._is_endpoint)[0])\n return instance._regex_dict[language_code]", + "docstring": "Return a compiled regular expression based on the active language.", + "type": "method", + "file_path": "django\\django\\urls\\resolvers.py", + "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes If Call Assign Call Return return:yes Assign Call If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "lebedev_rule", + "source_code": "def lebedev_rule(n):\n degree = [6, 14, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230, 266, 302, 350, 434, 590, 770, 974, 1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, 3890, 4334, 4802, 5294, 5810]\n order = [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 35, 41, 47, 53, 59, 65, 71, 77, 83, 89, 95, 101, 107, 113, 119, 125, 131]\n order_degree_map = dict(zip(order, degree))\n if n not in order_degree_map:\n message = f'Order n={n!r} not available. Available orders are {order}.'\n raise NotImplementedError(message)\n degree = order_degree_map[n]\n res = get_lebedev_sphere(degree)\n x = np.stack((res.x, res.y, res.z))\n w = res.w\n return (x, w)", + "docstring": "Lebedev quadrature. Compute the sample points and weights for Lebedev quadrature [1]_ for integration of a function over the surface of a unit sphere. Parameters ---------- n : int Quadrature order. See Notes for supported values. Returns ------- x : ndarray of shape `n10.1016/j.cam.2022.114142f(x, y, z) = \\exp(x)1`. Show the convergence to the expected result as the order increases: >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.integrate import lebedev_rule >>> >>> def f(x): ... return np.exp(x[0]) >>> >>> res = [] >>> orders = np.arange(3, 20, 2) >>> for n in orders: ... x, w = lebedev_rule(n) ... res.append(w @ f(x)) >>> >>> ref = np.full_like(res, 14.7680137457653) >>> err = abs(res - ref)/abs(ref) >>> plt.semilogy(orders, err) >>> plt.xlabel('order $n$') >>> plt.ylabel('relative error') >>> plt.title(r'Convergence for $f(x, y, z) = \\exp(x)$') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_lebedev.py", + "ast_data": "FunctionDef name:lebedev_rule arg:n arguments arg Assign Assign Assign Call Call If Compare Assign Raise Call Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_mark_func_graph_as_unsaveable", + "source_code": "def _mark_func_graph_as_unsaveable(graph, learning_phase):\n if graph.building_function and is_placeholder(learning_phase):\n graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')", + "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase. Functions that capture the symbolic learning phase cannot be exported to SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised if it is exported. Args: graph: Graph or FuncGraph object. learning_phase: Learning phase placeholder or int defined in the graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_mark_func_graph_as_unsaveable arg:graph arg:learning_phase arguments arg arg If BoolOp Call Call" + }, + { + "library": "scikit-learn", + "name": "_parallel_decision_function", + "source_code": "def _parallel_decision_function(estimators, estimators_features, X, params):\n return sum((estimator.decision_function(X[:, features], **params) for estimator, features in zip(estimators, estimators_features)))", + "docstring": "Private function used to compute decisions within a job.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_parallel_decision_function arg:estimators arg:estimators_features arg:X arg:params arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "_getsubdtype", + "source_code": "@classmethod\ndef _getsubdtype(cls, val):\n return np.array(val).dtype.type", + "docstring": "Returns the type of the dtype of the input variable.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_iotools.py", + "ast_data": "FunctionDef name:_getsubdtype arg:cls arg:val arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "validate", + "source_code": "def validate(self):\n if self._validated:\n return\n for constraint in self.constraints:\n _validate_pass_schedule_constraint(constraint, self.passes)\n self._validated = True", + "docstring": "Validates that current pass schedule defined by is valid according to all constraints in", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py", + "ast_data": "FunctionDef name:validate arg:self arguments arg If Return return:no For Call Assign" + }, + { + "library": "pytorch", + "name": "_optimize", + "source_code": "def _optimize(rebuild_ctx: Callable[[], Union[OptimizeContext, _NullDecorator]], backend='inductor', *, nopython=False, guard_export_fn=None, guard_fail_fn=None, guard_filter_fn=None, disable=False, dynamic=None) -> Union[OptimizeContext, _NullDecorator]:\n check_if_dynamo_supported()\n check_for_incompatible_configs()\n hooks = Hooks(guard_export_fn=guard_export_fn, guard_fail_fn=guard_fail_fn, guard_filter_fn=guard_filter_fn)\n torch._C._log_api_usage_once('torch._dynamo.optimize')\n if disable or os.environ.get('TORCHDYNAMO_DISABLE', '') == '1' or (not justknobs_check('pytorch/compiler:enable_dynamo')):\n return _NullDecorator()\n if nopython:\n return optimize_assert(backend, dynamic=dynamic, hooks=hooks, rebuild_ctx=rebuild_ctx)\n backend = get_compiler_fn(backend)\n backend_ctx_ctor = getattr(backend, 'backend_ctx_ctor', null_context)\n return _optimize_catch_errors(convert_frame.convert_frame(backend, hooks=hooks), hooks, backend_ctx_ctor, dynamic=dynamic, compiler_config=backend.get_compiler_config() if hasattr(backend, 'get_compiler_config') else None, rebuild_ctx=rebuild_ctx)", + "docstring": "The main entrypoint of TorchDynamo. Do graph capture and call backend() to optimize extracted graphs. Args: backend: One of the two things: - Either, a function/callable taking a torch.fx.GraphModule and example_inputs and returning a python callable that runs the graph faster. One can also provide additional context for the backend, like torch.jit.fuser(\"fuser2\"), by setting the backend_ctx_ctor attribute. See AOTAutogradMemoryEfficientFusionWithContext for the usage. - Or, a string backend name in nopython: If True, graph breaks will be errors and there will be a single whole-program graph. disable: If True, turn this decorator into a no-op dynamic: If True, upfront compile as dynamic a kernel as possible. If False, disable all dynamic shapes support (always specialize). If None, automatically detect when sizes vary and generate dynamic kernels upon recompile. Example Usage:: @torch._dynamo.optimize() def toy_example(a, b): ...", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py", + "ast_data": "FunctionDef name:_optimize arg:rebuild_ctx arg:backend arguments arg arg arg arg arg arg arg arg Call Call Assign Call Call If BoolOp Compare Call Call Return return:yes Call If Return return:yes Call Assign Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "scipy", + "name": "_CythonSpecialMeta", + "source_code": "class _CythonSpecialMeta(type):\n\n def __new__(cls, cls_name, bases, dct):\n params = [(10, 100, 1000), ('python', 'numpy', 'cython')]\n param_names = ['N', 'api']\n\n def get_time_func(name, args):\n\n @with_attributes(params=[(name,), (args,)] + params, param_names=['name', 'argument'] + param_names)\n def func(self, name, args, N, api):\n if api == 'python':\n self.py_func(N, *args)\n elif api == 'numpy':\n self.np_func(*self.obj)\n else:\n self.cy_func(N, *args)\n func.__name__ = 'time_' + name\n return func\n for name in FUNC_ARGS.keys():\n func = get_time_func(name, FUNC_ARGS[name])\n dct[func.__name__] = func\n return type.__new__(cls, cls_name, bases, dct)", + "docstring": "Add time_* benchmarks corresponding to cython_special._bench_*_cy", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\cython_special.py", + "ast_data": "ClassDef name:_CythonSpecialMeta FunctionDef name:__new__ arg:cls arg:cls_name arg:bases arg:dct arguments arg arg arg arg Assign Assign FunctionDef name:get_time_func arg:name arg:args arguments arg arg FunctionDef name:func arg:self arg:name arg:args arg:N arg:api arguments arg arg arg arg arg If Compare Call If Compare Call Call Call Assign Return return:yes For Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "trainable_variables", + "source_code": "@property\ndef trainable_variables(self):\n if self._variables_created:\n return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, self.variable_scope_name)\n else:\n return []", + "docstring": "Returns the list of trainable variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg If Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "wrap_py_func", + "source_code": "def wrap_py_func(f, args, kwargs=None):\n tensor_args = []\n tensor_args_idx = {}\n n_args = len(args)\n arg_is_tensor = tuple(map(tensor_util.is_tf_type, args))\n for i in range(n_args):\n if arg_is_tensor[i]:\n tensor_args_idx[i] = len(tensor_args)\n tensor_args.append(args[i])\n if kwargs:\n kwarg_keys = tuple(kwargs.keys())\n kwarg_is_tensor = {k: tensor_util.is_tf_type(kwargs[k]) for k in kwarg_keys}\n for k in kwarg_keys:\n if kwarg_is_tensor[k]:\n tensor_args_idx[k] = len(tensor_args)\n tensor_args.append(kwargs[k])\n else:\n kwarg_keys = ()\n\n def f_wrapper(*tensor_args):\n f_args = tuple((tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a for i, a in enumerate(args)))\n f_kwargs = {k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k] for i, k in enumerate(kwarg_keys)}\n f(*f_args, **f_kwargs)\n return 1\n return script_ops.eager_py_func(f_wrapper, tensor_args, dtypes.int32)", + "docstring": "Helper that wraps a callable to py_func. The helper passes tensor arguments through the py_func interface. Non-tensor arguments are allowed, and will be passed to f directly. Note that non-tensor arguments are captured by f will not update every time the wrapper is called (this is consistent with its argument list, which only includes the tensor arguments). In general, it's safest not to reuse this wrapper. Args: f: Callable args: Positional arguments for f, as list or tuple. kwargs: Keyword arguments for f, as dict with string keys. May be None. Returns: The return values of f converted to tensor. Raises: ValueError: if any of the arguments are incorrect.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\autograph_ops.py", + "ast_data": "FunctionDef name:wrap_py_func arg:f arg:args arg:kwargs arguments arg arg arg Assign Assign Assign Call Assign Call Call For Call If Assign Call Call If Assign Call Call Assign Call For If Assign Call Call Assign FunctionDef name:f_wrapper arguments arg Assign Call Call Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "idxmax", + "source_code": "def idxmax(self, skipna: bool=True) -> Series:\n return self._idxmax_idxmin('idxmax', skipna=skipna)", + "docstring": "Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA values. Returns ------- Series Indexes of maxima in each group. Raises ------ ValueError If the Series is empty or skipna=False and any value is NA. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... [\"2023-01-01\", \"2023-01-15\", \"2023-02-01\", \"2023-02-15\"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.groupby([\"a\", \"a\", \"b\", \"b\"]).idxmax() a 2023-01-15 b 2023-02-15 dtype: datetime64[s]", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\generic.py", + "ast_data": "FunctionDef name:idxmax arg:self arg:skipna arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "PinholeModel", + "source_code": "class PinholeModel(CameraModelBase):\n\n def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n if params.shape[-1] != 4 or len(params.shape) > 2:\n raise ValueError('params must be of shape (B, 4) for PINHOLE Camera')\n super().__init__(AffineTransform(), Z1Projection(), image_size, params)\n\n def matrix(self) -> Tensor:\n z = zeros_like(self.fx)\n row1 = stack((self.fx, z, self.cx), -1)\n row2 = stack((z, self.fy, self.cy), -1)\n row3 = stack((z, z, z), -1)\n K = stack((row1, row2, row3), -2)\n K[..., -1, -1] = 1.0\n return K\n\n def scale(self, scale_factor: Tensor) -> PinholeModel:\n fx = self.fx * scale_factor\n fy = self.fy * scale_factor\n cx = self.cx * scale_factor\n cy = self.cy * scale_factor\n params = stack((fx, fy, cx, cy), -1)\n image_size = ImageSize(self.image_size.height * scale_factor, self.image_size.width * scale_factor)\n return PinholeModel(image_size, params)", + "docstring": "Class to represent Pinhole Camera Model. The pinhole camera model describes the mathematical relationship between the coordinates of a point in three-dimensional space and its projection onto the image plane of an ideal pinhole camera, where the camera aperture is described as a point and no lenses are used to focus light. See more: Example: >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> cam CameraModel(ImageSize(height=480, width=640), PinholeModel, tensor([328., 328., 320., 240.]))", + "type": "class", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "ClassDef name:PinholeModel FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call FunctionDef name:matrix arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes FunctionDef name:scale arg:self arg:scale_factor arguments arg arg Assign Assign Assign Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "save_optimizer_weights_to_hdf5_group", + "source_code": "def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):\n symbolic_weights = getattr(optimizer, 'weights')\n if symbolic_weights:\n weights_group = hdf5_group.create_group('optimizer_weights')\n weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]\n save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)\n weight_values = backend.batch_get_value(symbolic_weights)\n for name, val in zip(weight_names, weight_values):\n param_dset = weights_group.create_dataset(name, val.shape, dtype=val.dtype)\n if not val.shape:\n param_dset[()] = val\n else:\n param_dset[:] = val", + "docstring": "Saves optimizer weights of a optimizer to a HDF5 group. Args: hdf5_group: HDF5 group. optimizer: optimizer instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py", + "ast_data": "FunctionDef name:save_optimizer_weights_to_hdf5_group arg:hdf5_group arg:optimizer arguments arg arg Assign Call If Assign Call Assign Call Call Call Assign Call For Call Assign Call If Assign Assign" + }, + { + "library": "tensorflow", + "name": "LSTMStateTuple", + "source_code": "@tf_export(v1=['nn.rnn_cell.LSTMStateTuple'])\nclass LSTMStateTuple(_LSTMStateTuple):\n __slots__ = ()\n\n @property\n def dtype(self):\n c, h = self\n if c.dtype != h.dtype:\n raise TypeError('Inconsistent internal state: %s vs %s' % (str(c.dtype), str(h.dtype)))\n return c.dtype", + "docstring": "Tuple used by LSTM Cells for , , and output state. Stores two elements: , in that order. Where is the hidden state and is the output. Only used when .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "ClassDef name:LSTMStateTuple Assign FunctionDef name:dtype arg:self arguments arg Assign If Compare Raise Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "line_map_to_segments", + "source_code": "def line_map_to_segments(junctions: Tensor, line_map: Tensor) -> Tensor:\n junc_loc1, junc_loc2 = where(torch.triu(line_map))\n segments = stack([junctions[junc_loc1], junctions[junc_loc2]], 1)\n return segments", + "docstring": "Convert a junction connectivity map to a Nx2x2 tensor of segments.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py", + "ast_data": "FunctionDef name:line_map_to_segments arg:junctions arg:line_map arguments arg arg Assign Call Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "footer", + "source_code": "def footer(self):\n return ''", + "docstring": "Render HTML layout footer.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py", + "ast_data": "FunctionDef name:footer arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_cplxpair", + "source_code": "def _cplxpair(z, tol=None):\n z = atleast_1d(z)\n if z.size == 0 or np.isrealobj(z):\n return np.sort(z)\n if z.ndim != 1:\n raise ValueError('z must be 1-D')\n zc, zr = _cplxreal(z, tol)\n zc = np.dstack((zc.conj(), zc)).flatten()\n z = np.append(zc, zr)\n return z", + "docstring": "Sort into pairs of complex conjugates. Complex conjugates in are sorted by increasing real part. In each pair, the number with negative imaginary part appears first. If pairs have identical real parts, they are sorted by increasing imaginary magnitude. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than `tolzz` for which a conjugate cannot be found. See Also -------- _cplxreal Examples -------- >>> from scipy.signal._filter_design import _cplxpair >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> z = _cplxpair(a) >>> print(z) [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j 3.+0.j 4.+0.j]", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:_cplxpair arg:z arg:tol arguments arg arg Assign Call If BoolOp Compare Call Return return:yes Call If Compare Raise Call Assign Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_threshold", + "source_code": "def _threshold(input: Tensor, threshold: float, value: float, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace)\n if inplace:\n result = _VF.threshold_(input, threshold, value)\n else:\n result = _VF.threshold(input, threshold, value)\n return result", + "docstring": "Apply a threshold to each element of the input Tensor. See :class: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:_threshold arg:input arg:threshold arg:value arg:inplace arguments arg arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_dense_var_to_tensor", + "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n if as_ref:\n raise ValueError('Cannot convert AutoCastVariable to a tensor if as_ref=True is passed to convert_to_tensor')\n if not self._should_cast():\n return tensor_conversion.convert_to_tensor_v2_with_dispatch(self._variable, dtype=dtype, name=name)\n if dtype is not None and (not dtype.is_compatible_with(self._cast_dtype)):\n raise ValueError('Incompatible type conversion requested to type {!r} for AutoCastVariable which is casted to type {!r}'.format(dtype.name, self._cast_dtype.name))\n val = tensor_conversion.convert_to_tensor_v2_with_dispatch(self._variable, dtype=self._variable.dtype, name=name)\n return math_ops.cast(val, self._cast_dtype)", + "docstring": "Converts this variable to a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py", + "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Raise Call If Call Return return:yes Call If BoolOp Compare Call Raise Call Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "stride", + "source_code": "def stride(self, name, index=None):\n if name is None:\n val = self.output_node.get_stride()\n else:\n assert isinstance(name, str)\n val = self.named_input_nodes[name].get_stride()\n if isinstance(index, int):\n return texpr(self.rename_indexing(val[index]))\n return ', '.join([texpr(self.rename_indexing(i)) for i in val])", + "docstring": "Hook called from template code to get the stride of an arg. Will add needed args to pass it in if it is dynamic.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:stride arg:self arg:name arg:index arguments arg arg arg If Compare Assign Call Call Assign Call If Call Return return:yes Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "add_to_set_toplevel", + "source_code": "@staticmethod\ndef add_to_set_toplevel(key: str, value: Any, log_level: CompileEventLogLevel=CompileEventLogLevel.COMPILATION_METRIC):\n chromium_log = get_chromium_event_logger()\n top_event = chromium_log.get_outermost_event()\n if top_event is None:\n raise RuntimeError('No toplevel event active. Please only call this function within a metrics context/dynamo_timed.')\n CompileEventLogger.add_to_set(top_event, log_level, key, value)", + "docstring": "Same as add to set, just does it automatically to the toplevel event instead of having to explicitly name it. Defaults to COMPILATION_METRIC log level.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:add_to_set_toplevel arg:key arg:value arg:log_level arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call" + }, + { + "library": "scipy", + "name": "_resolve_ufunc", + "source_code": "def _resolve_ufunc(self, **kwargs):\n if isinstance(self._ufunc_or_ufuncs, np.ufunc):\n return self._ufunc_or_ufuncs\n ufunc_key = self._key(**kwargs)\n return self._ufunc_or_ufuncs[ufunc_key]", + "docstring": "Resolve to a ufunc based on keyword arguments.", + "type": "method", + "file_path": "scipy\\scipy\\special\\_multiufuncs.py", + "ast_data": "FunctionDef name:_resolve_ufunc arg:self arguments arg arg If Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "angle_to_rotation_matrix", + "source_code": "def angle_to_rotation_matrix(angle: Tensor) -> Tensor:\n ang_rad = deg2rad(angle)\n cos_a: Tensor = cos(ang_rad)\n sin_a: Tensor = sin(ang_rad)\n return stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", + "docstring": "Create a rotation matrix out of angles in degrees. Args: angle: tensor of angles in degrees, any shape :math:. Returns: tensor of rotation matrices with shape :math:. Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_to_rotation_matrix(input) # Nx3x2x2", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:angle_to_rotation_matrix arg:angle arguments arg Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "convert_cmake_value_to_python_value", + "source_code": "def convert_cmake_value_to_python_value(cmake_value: str, cmake_type: str) -> CMakeValue:\n cmake_type = cmake_type.upper()\n up_val = cmake_value.upper()\n if cmake_type == 'BOOL':\n return not (up_val in ('FALSE', 'OFF', 'N', 'NO', '0', '', 'NOTFOUND') or up_val.endswith('-NOTFOUND'))\n elif cmake_type == 'FILEPATH':\n if up_val.endswith('-NOTFOUND'):\n return None\n else:\n return cmake_value\n else:\n return cmake_value", + "docstring": "Convert a CMake value in a string form to a Python value. Args: cmake_value (string): The CMake value in a string form (e.g., \"ON\", \"OFF\", \"1\"). cmake_type (string): The CMake type of :attr:. Returns: A Python value corresponding to :attr: with type :attr:.", + "type": "function", + "file_path": "pytorch\\tools\\setup_helpers\\cmake_utils.py", + "ast_data": "FunctionDef name:convert_cmake_value_to_python_value arg:cmake_value arg:cmake_type arguments arg arg Assign Call Assign Call If Compare Return return:yes BoolOp Compare Call If Compare If Call Return return:no Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "row_norms", + "source_code": "def row_norms(X, squared=False):\n if sparse.issparse(X):\n X = X.tocsr()\n norms = csr_row_norms(X)\n if not squared:\n norms = np.sqrt(norms)\n else:\n xp, _ = get_namespace(X)\n if _is_numpy_namespace(xp):\n X = np.asarray(X)\n norms = np.einsum('ij,ij->i', X, X)\n norms = xp.asarray(norms)\n else:\n norms = xp.sum(xp.multiply(X, X), axis=1)\n if not squared:\n norms = xp.sqrt(norms)\n return norms", + "docstring": "Row-wise (squared) Euclidean norm of X. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse matrices and does not create an X.shape-sized temporary. Performs no input validation. Parameters ---------- X : array-like The input array. squared : bool, default=False If True, return squared norms. Returns ------- array-like The row-wise (squared) Euclidean norm of X.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:row_norms arg:X arg:squared arguments arg arg If Call Assign Call Assign Call If Assign Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_string", + "source_code": "@classmethod\ndef from_string(cls, layout_str: str) -> 'Layout':\n return cls._new_object(layout_str=layout_str)", + "docstring": "Creates an instance from a human-readable string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:from_string arg:cls arg:layout_str arguments arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "query_token", + "source_code": "def query_token(self, token_string, token_type_hint):\n raise NotImplementedError()", + "docstring": "Get the token from database/storage by the given token string. Developers should implement this method:: def query_token(self, token_string, token_type_hint): if token_type_hint == 'access_token': return Token.query_by_access_token(token_string) if token_type_hint == 'refresh_token': return Token.query_by_refresh_token(token_string) return Token.query_by_access_token(token_string) or Token.query_by_refresh_token(token_string)", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py", + "ast_data": "FunctionDef name:query_token arg:self arg:token_string arg:token_type_hint arguments arg arg arg Raise Call" + }, + { + "library": "pandas", + "name": "itemsize", + "source_code": "@cache_readonly\ndef itemsize(self) -> int:\n return self.numpy_dtype.itemsize", + "docstring": "Return the number of bytes in this dtype", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:itemsize arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "FunctionModifiers", + "source_code": "class FunctionModifiers:\n UNUSED = 'unused (ignored and replaced with raising of an exception)'\n IGNORE = \"ignore (leave as a call to Python, cannot be torch.jit.save'd)\"\n EXPORT = 'export (compile this function even if nothing calls it)'\n DEFAULT = 'default (compile if called from a exported function / forward)'\n COPY_TO_SCRIPT_WRAPPER = 'if this method is not scripted, copy the python method onto the scripted model'\n _DROP = '_drop (function is fully ignored, declaration can be unscriptable)'", + "docstring": "Used to denote the behavior of a function in TorchScript. See export() and ignore() for details.", + "type": "class", + "file_path": "pytorch\\torch\\_jit_internal.py", + "ast_data": "ClassDef name:FunctionModifiers Assign Assign Assign Assign Assign Assign" + }, + { + "library": "matplotlib", + "name": "__getitem__", + "source_code": "def __getitem__(self, name):\n if self.is_available(name):\n return self._registered[name]\n raise RuntimeError(f'Requested MovieWriter ({name}) not available')", + "docstring": "Get an available writer class from its name.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:name arguments arg arg If Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "create_saveable_object", + "source_code": "def create_saveable_object(name, key, factory, call_with_mapped_captures):\n if call_with_mapped_captures is None:\n return factory(name=key)\n if name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n return factory(name=key, call_with_mapped_captures=call_with_mapped_captures)\n elif is_factory_for_restored_saveable_object(factory):\n concrete_save_fn = factory.keywords['save_function']\n\n def save_fn(name):\n return call_with_mapped_captures(concrete_save_fn, [name])\n concrete_restore_fn = factory.keywords['restore_function']\n\n def restore_fn(*restored_tensors):\n return call_with_mapped_captures(concrete_restore_fn, restored_tensors)\n return factory(save_function=save_fn, restore_function=restore_fn, name=key)\n else:\n return factory(name=key)", + "docstring": "Creates a SaveableObject while potentially in a different graph. When creating the frozen saver for SavedModel, the save and restore ops are placed in a separate graph. Since RestoredSaveableObject uses tf.functions to save and restore, the function captures must be mapped to the new graph. Args: name: Name of SaveableObject factory. key: Checkpoint key of this SaveableObject. factory: Factory method for creating the SaveableObject. call_with_mapped_captures: Helper that calls a tf.function while remapping the captures. Returns: a SaveableObject.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:create_saveable_object arg:name arg:key arg:factory arg:call_with_mapped_captures arguments arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Call Assign FunctionDef name:save_fn arg:name arguments arg Return return:yes Call Assign FunctionDef name:restore_fn arguments arg Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__torch_function__", + "source_code": "@classmethod\ndef __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n if not all((issubclass(cls, t) for t in types)):\n return NotImplemented\n with _C.DisableTorchFunctionSubclass():\n ret = func(*args, **kwargs)\n if func in get_default_nowrap_functions():\n return ret\n else:\n return _convert(ret, cls)", + "docstring": "This __torch_function__ implementation wraps subclasses such that methods called on subclasses return a subclass instance instead of a `__torch_function__` a classmethod.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:__torch_function__ arg:cls arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg If Compare Assign If Call Call Return return:yes With Call Assign Call If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "push_current", + "source_code": "def push_current(self):\n self._nav_stack.push(WeakKeyDictionary({ax: (ax._get_view(), (ax.get_position(True).frozen(), ax.get_position().frozen())) for ax in self.canvas.figure.axes}))\n self.set_history_buttons()", + "docstring": "Push the current view limits and position onto the stack.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:push_current arg:self arguments arg Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "__bool__", + "source_code": "def __bool__(self):\n return self._dims is not None", + "docstring": "Returns True if this shape contains non-zero information.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pandas", + "name": "_pprint_dict", + "source_code": "def _pprint_dict(seq: Mapping, _nest_lvl: int=0, max_seq_items: int | None=None, **kwds: Any) -> str:\n fmt = '{{{things}}}'\n pairs = []\n pfmt = '{key}: {val}'\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option('max_seq_items') or len(seq)\n for k, v in list(seq.items())[:nitems]:\n pairs.append(pfmt.format(key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))\n if nitems < len(seq):\n return fmt.format(things=', '.join(pairs) + ', ...')\n else:\n return fmt.format(things=', '.join(pairs))", + "docstring": "internal. pprinter for iterables. you should probably use pprint_thing() rather than calling this directly.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\printing.py", + "ast_data": "FunctionDef name:_pprint_dict arg:seq arg:_nest_lvl arg:max_seq_items arguments arg arg arg arg Assign Assign Assign If Compare Assign Call Assign BoolOp Call Call For Call Call Call Call Call Call If Compare Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "is_closed", + "source_code": "@abstractmethod\ndef is_closed(self) -> bool:\n pass", + "docstring": "Check whether the rendezvous has been closed. A closed rendezvous means all future attempts to re-rendezvous within same job will fail. `set_closed` have semantics of eventual propagation and should not be used for synchronization. The intention is that if at least one node decides the job is finished, it will close the rendezvous, and other nodes will soon observe this and stop running as well.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "FunctionDef name:is_closed arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "get", + "source_code": "def get(identifier):\n if isinstance(identifier, dict):\n return deserialize(identifier)\n elif isinstance(identifier, str):\n return deserialize(str(identifier))\n elif callable(identifier):\n return identifier\n else:\n raise ValueError('Could not interpret metric function identifier: {}'.format(identifier))", + "docstring": "Retrieves a Keras metric as a / class instance. The may be the string name of a metric function or class. >>> metric = tf.keras.metrics.get(\"categorical_crossentropy\") >>> type(metric) >>> metric = tf.keras.metrics.get(\"CategoricalCrossentropy\") >>> type(metric) You can also specify of the metric to this function by passing dict containing and as an identifier. Also note that the must map to a class >>> identifier = {\"class_name\": \"CategoricalCrossentropy\", ... \"config\": {\"from_logits\": True}} >>> metric = tf.keras.metrics.get(identifier) >>> type(metric) Args: identifier: A metric identifier. One of None or string name of a metric function/class or metric configuration dictionary or a metric function or a metric class instance Returns: A Keras metric as a / class instance. Raises: ValueError: If cannot be interpreted.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:get arg:identifier arguments arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Raise Call Call" + }, + { + "library": "scipy", + "name": "sum_labels", + "source_code": "def sum_labels(input, labels=None, index=None):\n count, sum = _stats(input, labels, index)\n return sum", + "docstring": "Calculate the sum of the values of the array. Parameters ---------- input : array_like Values of inside the regions defined by are summed together. labels : array_like of ints, optional Assign labels to the values of the array. Has to have the same shape as . index : array_like, optional A single label number or a sequence of label numbers of the objects to be measured. Returns ------- sum : ndarray or scalar An array of the sums of values of inside the regions defined by with the same shape as . If 'index' is None or scalar, a scalar is returned. See Also -------- mean, median Examples -------- >>> from scipy import ndimage >>> input = [0,1,2,3] >>> labels = [1,1,2,2] >>> ndimage.sum_labels(input, labels, index=[1,2]) [1.0, 5.0] >>> ndimage.sum_labels(input, labels, index=1) 1 >>> ndimage.sum_labels(input, labels) 6", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_measurements.py", + "ast_data": "FunctionDef name:sum_labels arg:input arg:labels arg:index arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "LinAlgWarning", + "source_code": "class LinAlgWarning(RuntimeWarning):\n pass", + "docstring": "The warning emitted when a linear algebra related operation is close to fail conditions of the algorithm or loss of accuracy is expected.", + "type": "class", + "file_path": "scipy\\scipy\\linalg\\_misc.py", + "ast_data": "ClassDef name:LinAlgWarning" + }, + { + "library": "pandas", + "name": "__dataframe__", + "source_code": "def __dataframe__(self, nan_as_null: bool=False, allow_copy: bool=True) -> DataFrameXchg:\n from pandas.core.interchange.dataframe import PandasDataFrameXchg\n return PandasDataFrameXchg(self, allow_copy=allow_copy)", + "docstring": "Return the dataframe interchange object implementing the interchange protocol. .. note:: For new development, we highly recommend using the Arrow C Data Interface alongside the Arrow PyCapsule Interface instead of the interchange protocol .. warning:: Due to severe implementation issues, we recommend only considering using the interchange protocol in the following cases: - converting to pandas: for pandas >= 2.0.3 - converting from pandas: for pandas >= 3.0.0 Parameters ---------- nan_as_null : bool, default False is DEPRECATED and has no effect. Please avoid using it; it will be removed in a future release. allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. Notes ----- Details on the interchange protocol: Examples -------- >>> df_not_necessarily_pandas = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> interchange_object = df_not_necessarily_pandas.__dataframe__() >>> interchange_object.column_names() Index(['A', 'B'], dtype='object') >>> df_pandas = pd.api.interchange.from_dataframe( ... interchange_object.select_columns_by_name([\"A\"]) ... ) >>> df_pandas A 0 1 1 2 These methods (``) should work for any dataframe library which implements the interchange protocol.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:__dataframe__ arg:self arg:nan_as_null arg:allow_copy arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Mishra03", + "source_code": "class Mishra03(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[-9.99378322, -9.99918927]]\n self.fglob = -0.19990562\n\n def fun(self, x, *args):\n self.nfev += 1\n return 0.01 * (x[0] + x[1]) + sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))", + "docstring": "Mishra 3 objective function. This class defines the Mishra 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra03}}(x) = \\sqrt{\\lvert \\cos{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think that Jamil#76 has the wrong global minimum, a smaller one is possible", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", + "ast_data": "ClassDef name:Mishra03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_fn", + "source_code": "def _copy_fn(fn):\n if not callable(fn):\n raise TypeError('fn is not callable: %s' % fn)\n return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)", + "docstring": "Create a deep copy of fn. Args: fn: a callable Returns: A : a deep copy of fn. Raises: TypeError: if is not a callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:_copy_fn arg:fn arguments arg If Call Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "GroupManager", + "source_code": "class GroupManager(models.Manager):\n use_in_migrations = True\n\n def get_by_natural_key(self, name):\n return self.get(name=name)\n\n async def aget_by_natural_key(self, name):\n return await self.aget(name=name)", + "docstring": "The manager for the auth's Group model.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\models.py", + "ast_data": "ClassDef name:GroupManager Assign FunctionDef name:get_by_natural_key arg:self arg:name arguments arg arg Return return:yes Call AsyncFunctionDef name:aget_by_natural_key arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "DisallowedRedirect", + "source_code": "class DisallowedRedirect(SuspiciousOperation):\n pass", + "docstring": "Redirect to scheme not in allowed list", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:DisallowedRedirect" + }, + { + "library": "pytorch", + "name": "range_check", + "source_code": "def range_check(i, n):\n if i >= 0:\n return T() if i < n else F()\n else:\n return T() if i >= n else F()", + "docstring": "Checks if an index i is within range of a size n list Args: i: index n: list size Returns: Boolean", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py", + "ast_data": "FunctionDef name:range_check arg:i arg:n arguments arg arg If Compare Return return:yes Compare Call Call Return return:yes Compare Call Call" + }, + { + "library": "pandas", + "name": "_dir_additions", + "source_code": "@final\ndef _dir_additions(self) -> set[str]:\n additions = super()._dir_additions()\n if self._info_axis._can_hold_strings:\n additions.update(self._info_axis._dir_additions_for_owner)\n return additions", + "docstring": "add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used.", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_dir_additions arg:self arguments arg Assign Call Call If Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_remove_dropped_categories", + "source_code": "def _remove_dropped_categories(self, categories, i):\n if self._drop_idx_after_grouping is not None and self._drop_idx_after_grouping[i] is not None:\n return np.delete(categories, self._drop_idx_after_grouping[i])\n return categories", + "docstring": "Remove dropped categories.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py", + "ast_data": "FunctionDef name:_remove_dropped_categories arg:self arg:categories arg:i arguments arg arg arg If BoolOp Compare Compare Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "Schaffer02", + "source_code": "class Schaffer02(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.custom_bounds = [(-10, 10), (-10, 10)]\n self.global_optimum = [[0.0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n num = sin(x[0] ** 2 - x[1] ** 2) ** 2 - 0.5\n den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2\n return 0.5 + num / den", + "docstring": "Schaffer 2 objective function. This class defines the Schaffer 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer02}}(x) = 0.5 + \\frac{\\sin^2 (x_1^2 - x_2^2)^2 - 0.5} {1 + 0.001(x_1^2 + x_2^2)^2} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Schaffer02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_e_step", + "source_code": "def _e_step(self, X, cal_sstats, random_init, parallel=None):\n random_state = self.random_state_ if random_init else None\n n_jobs = effective_n_jobs(self.n_jobs)\n if parallel is None:\n parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))\n results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, cal_sstats, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))\n doc_topics, sstats_list = zip(*results)\n doc_topic_distr = np.vstack(doc_topics)\n if cal_sstats:\n suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)\n for sstats in sstats_list:\n suff_stats += sstats\n suff_stats *= self.exp_dirichlet_component_\n else:\n suff_stats = None\n return (doc_topic_distr, suff_stats)", + "docstring": "E-step in EM update. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. cal_sstats : bool Parameter that indicate whether to calculate sufficient statistics or not. Set `doc_topic_distrgammasuff_statscal_sstats == False`, it will be None.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py", + "ast_data": "FunctionDef name:_e_step arg:self arg:X arg:cal_sstats arg:random_init arg:parallel arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call Call Assign Call Call Call Call Assign Call Assign Call If Assign Call For Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "ismethod", + "source_code": "def ismethod(object):\n return _inspect.ismethod(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.ismethod.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:ismethod arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Options", + "source_code": "class Options(object):\n\n def __init__(self, bytes_per_pack=0, timeout_seconds=None, implementation=CommunicationImplementation.AUTO):\n if bytes_per_pack < 0:\n raise ValueError(f'Argument `bytes_per_pack` must be >=0, Received {bytes_per_pack}.')\n if isinstance(implementation, str):\n implementation = CommunicationImplementation(implementation.upper())\n if not isinstance(implementation, CommunicationImplementation):\n raise ValueError('Argument `implementation` must be instance of `tf.distribute.experimental.CommunicationImplementation`.')\n self.bytes_per_pack = bytes_per_pack\n self.timeout_seconds = timeout_seconds\n self.implementation = implementation\n __init__.__doc__ = _OptionsExported.__init__.__doc__\n\n def merge(self, options):\n merged = copy.deepcopy(self)\n if options is None:\n return merged\n if options.bytes_per_pack != 0:\n merged.bytes_per_pack = options.bytes_per_pack\n if options.timeout_seconds is not None:\n merged.timeout_seconds = options.timeout_seconds\n if options.implementation != CommunicationImplementation.AUTO:\n merged.implementation = options.implementation\n return merged\n\n def __str__(self):\n return f'Options(bytes_per_pack={self.bytes_per_pack},timeout_seconds={self.timeout_seconds}, implementation={self.implementation})'", + "docstring": "Implementation of OptionsInterface.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py", + "ast_data": "ClassDef name:Options FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arg:implementation arguments arg arg arg arg If Compare Raise Call If Call Assign Call Call If Call Raise Call Assign Assign Assign Assign FunctionDef name:merge arg:self arg:options arguments arg arg Assign Call If Compare Return return:yes If Compare Assign If Compare Assign If Compare Assign Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return self._shape", + "docstring": "Shape of the covariance array", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_covariance.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n X = np.atleast_2d(X)\n length_scale = _check_length_scale(X, self.length_scale)\n if Y is None:\n dists = pdist(X / length_scale, metric='sqeuclidean')\n K = np.exp(-0.5 * dists)\n K = squareform(K)\n np.fill_diagonal(K, 1)\n else:\n if eval_gradient:\n raise ValueError('Gradient can only be evaluated when Y is None.')\n dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean')\n K = np.exp(-0.5 * dists)\n if eval_gradient:\n if self.hyperparameter_length_scale.fixed:\n return (K, np.empty((X.shape[0], X.shape[0], 0)))\n elif not self.anisotropic or length_scale.shape[0] == 1:\n K_gradient = (K * squareform(dists))[:, :, np.newaxis]\n return (K, K_gradient)\n elif self.anisotropic:\n K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / length_scale ** 2\n K_gradient *= K[..., np.newaxis]\n return (K, K_gradient)\n else:\n return K", + "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call If Raise Call Assign Call Assign Call If If Return return:yes Call If BoolOp Compare Assign Call Return return:yes If Assign Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "nsmallest", + "source_code": "def nsmallest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series:\n return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest()", + "docstring": "Return the smallest elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of elements: - `nnnnnnnnnkeepnn` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep=\"all\") Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:nsmallest arg:self arg:n arg:keep arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "cx_left", + "source_code": "@property\ndef cx_left(self) -> Tensor:\n return self.rectified_left_camera[..., 0, 2]", + "docstring": "Return the x-coordinate of the principal point for the left camera. Returns: tensor of shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:cx_left arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_yeo_johnson_optimize", + "source_code": "def _yeo_johnson_optimize(self, x):\n x_tiny = np.finfo(np.float64).tiny\n\n def _neg_log_likelihood(lmbda):\n x_trans = self._yeo_johnson_transform(x, lmbda)\n n_samples = x.shape[0]\n x_trans_var = x_trans.var()\n if x_trans_var < x_tiny:\n return np.inf\n log_var = np.log(x_trans_var)\n loglike = -n_samples / 2 * log_var\n loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()\n return -loglike\n x = x[~np.isnan(x)]\n return _yeojohnson_lambda(_neg_log_likelihood, x)", + "docstring": "Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:_yeo_johnson_optimize arg:self arg:x arguments arg arg Assign Call FunctionDef name:_neg_log_likelihood arg:lmbda arguments arg Assign Call Assign Assign Call If Compare Return return:yes Assign Call Assign Call Call Call Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "index", + "source_code": "@cherrypy.expose\ndef index(self):\n return 'Hello world!'", + "docstring": "Produce HTTP response body of hello world app index URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut01_helloworld.py", + "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "key_of", + "source_code": "@staticmethod\ndef key_of(node):\n sizevars = V.graph.sizevars\n return (node.get_device().type, str(node.get_dtype()), *sizevars.size_hints(node.get_size(), fallback=config.unbacked_symint_fallback), *sizevars.size_hints(node.get_stride(), fallback=config.unbacked_symint_fallback), sizevars.size_hint(node.get_layout().offset, fallback=config.unbacked_symint_fallback))", + "docstring": "Extract the pieces of an ir.Buffer that we should invalidate cached autotuning results on.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:key_of arg:node arguments arg Assign Return return:yes Call Call Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "fix_invalid", + "source_code": "def fix_invalid(a, mask=nomask, copy=True, fill_value=None):\n a = masked_array(a, copy=copy, mask=mask, subok=True)\n invalid = np.logical_not(np.isfinite(a._data))\n if not invalid.any():\n return a\n a._mask |= invalid\n if fill_value is None:\n fill_value = a.fill_value\n a._data[invalid] = fill_value\n return a", + "docstring": "Return input with invalid data masked and replaced by a fill value. Invalid data means values of , , etc. Parameters ---------- a : array_like Input array, a (subclass of) ndarray. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as . True indicates a masked (i.e. invalid) data. copy : bool, optional Whether to use a copy of (True) or to fix in place (False). Default is True. fill_value : scalar, optional Value used for fixing invalid data. Default is None, in which case the `` is used. Returns ------- b : MaskedArray The input array with invalid entries fixed. Notes ----- A copy is performed by default. Examples -------- >>> import numpy as np >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data=[--, -1.0, nan, inf], mask=[ True, False, False, False], fill_value=1e+20) >>> np.ma.fix_invalid(x) masked_array(data=[--, -1.0, --, --], mask=[ True, False, True, True], fill_value=1e+20) >>> fixed = np.ma.fix_invalid(x) >>> fixed.data array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) >>> x.data array([ 1., -1., nan, inf])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:fix_invalid arg:a arg:mask arg:copy arg:fill_value arguments arg arg arg arg Assign Call Assign Call Call If Call Return return:yes If Compare Assign Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "check_username_and_password", + "source_code": "def check_username_and_password(self, username, password):\n pass", + "docstring": "Assert the login credentials. :param username: A user name sent from the login form. :type username: str :param password: A pass word sent from the login form. :type password: str :returns: A non-empty error string if the authentication fails. :rtype: str", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:check_username_and_password arg:self arg:username arg:password arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "get_trtengineop_node_op_count", + "source_code": "def get_trtengineop_node_op_count(graphdef, node_name):\n ops_in_engine = collections.defaultdict(int)\n for func in graphdef.library.function:\n if f'{node_name}_native_segment' == func.signature.name:\n node_count = len(func.node_def)\n for node in func.node_def:\n ops_in_engine[node.op] += 1\n break\n return (node_count, ops_in_engine)", + "docstring": "Counts the number of nodes and OP types of a given TRTEngineOp.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py", + "ast_data": "FunctionDef name:get_trtengineop_node_op_count arg:graphdef arg:node_name arguments arg arg Assign Call For If Compare Assign Call For Return return:yes" + }, + { + "library": "tensorflow", + "name": "value_type", + "source_code": "@property\ndef value_type(self):\n return Tensor", + "docstring": "The Python type for values that are compatible with this TypeSpec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:value_type arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "_copy_permissions", + "source_code": "@staticmethod\ndef _copy_permissions(mode, filename):\n if mode & stat.S_IROTH:\n os.chmod(filename, mode)", + "docstring": "If the file in the archive has some permissions (this assumes a file won't be writable/executable without being readable), apply those permissions to the unarchived file.", + "type": "method", + "file_path": "django\\django\\utils\\archive.py", + "ast_data": "FunctionDef name:_copy_permissions arg:mode arg:filename arguments arg arg If Call" + }, + { + "library": "pandas", + "name": "_to_ijv", + "source_code": "def _to_ijv(ss, row_levels: tuple[int] | list[int]=(0,), column_levels: tuple[int] | list[int]=(1,), sort_labels: bool=False) -> tuple[np.ndarray, npt.NDArray[np.intp], npt.NDArray[np.intp], list[IndexLabel], list[IndexLabel]]:\n _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))\n sp_vals = ss.array.sp_values\n na_mask = notna(sp_vals)\n values = sp_vals[na_mask]\n valid_ilocs = ss.array.sp_index.indices[na_mask]\n i_coords, i_labels = _levels_to_axis(ss, row_levels, valid_ilocs, sort_labels=sort_labels)\n j_coords, j_labels = _levels_to_axis(ss, column_levels, valid_ilocs, sort_labels=sort_labels)\n return (values, i_coords, j_coords, i_labels, j_labels)", + "docstring": "For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor, and ilabels and jlabels are the row and column labels respectively. Parameters ---------- ss : Series row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. When and/or refer to a single level, set to for a faster execution. Returns ------- values : numpy.ndarray Valid values to populate a sparse matrix, extracted from ss. i_coords : numpy.ndarray (row coordinates of the values) j_coords : numpy.ndarray (column coordinates of the values) i_labels : list (row labels) j_labels : list (column labels)", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\scipy_sparse.py", + "ast_data": "FunctionDef name:_to_ijv arg:ss arg:row_levels arg:column_levels arg:sort_labels arguments arg arg arg arg Call Call Assign Assign Call Assign Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "stop", + "source_code": "def stop(self) -> Deferred[Any]:\n return self._stop()", + "docstring": "Stops simultaneously all the crawling jobs taking place. Returns a deferred that is fired when they all have ended.", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "object_identifier", + "source_code": "@abc.abstractproperty\ndef object_identifier(self):\n raise NotImplementedError", + "docstring": "String stored in object identifier field in the SavedModel proto. Returns: A string with the object identifier, which is used at load time.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py", + "ast_data": "FunctionDef name:object_identifier arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "_dense_var_to_tensor", + "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n if values_util.is_saving_non_distributed():\n return ops.convert_to_tensor(self._primary, dtype=dtype, name=name, as_ref=as_ref)\n with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n return ops.convert_to_tensor(self._get(), dtype=dtype, name=name, as_ref=as_ref)", + "docstring": "Converts a variable to a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Return return:yes Call With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "update_if_finite_grads", + "source_code": "def update_if_finite_grads():\n\n def incr_loss_scale():\n new_loss_scale = self.current_loss_scale * self.multiplier\n return control_flow_ops.group(_assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0))\n return cond.cond(self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1)))", + "docstring": "Update assuming the gradients are finite.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:update_if_finite_grads arguments FunctionDef name:incr_loss_scale arguments Assign Return return:yes Call Call Call Return return:yes Call Compare arguments Call Call" + }, + { + "library": "tensorflow", + "name": "recreate_saveable_objects", + "source_code": "def recreate_saveable_objects(saveable_fn_by_name, temp_session):\n names_and_slices = []\n with ops.init_scope():\n for save_fn, _ in saveable_fn_by_name.values():\n for tensor_info in save_fn(''):\n name = tensor_info['name']\n slice_spec = tensor_info['slice_spec']\n if not context.executing_eagerly():\n sess = ops.get_default_session()\n if sess is None:\n if temp_session[0] is not None:\n sess = temp_session[0]\n else:\n sess = temp_session[0] = session.Session()\n name, slice_spec = sess.run([name, slice_spec])\n names_and_slices.append((_convert_to_string(name), _convert_to_string(slice_spec)))\n saveable_factories = {}\n for name, (save_fn, restore_fn) in saveable_fn_by_name.items():\n saveable_factories[name] = functools.partial(RestoredSaveableObject, names_and_slices=names_and_slices, save_function=save_fn, restore_function=restore_fn)\n return saveable_factories", + "docstring": "Returns a dict of SaveableObject factories generated from loaded fns.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:recreate_saveable_objects arg:saveable_fn_by_name arg:temp_session arguments arg arg Assign With Call For Call For Call Assign Assign If Call Assign Call If Compare If Compare Assign Assign Call Assign Call Call Call Call Assign For Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_proj_type", + "source_code": "def set_proj_type(self, proj_type, focal_length=None):\n _api.check_in_list(['persp', 'ortho'], proj_type=proj_type)\n if proj_type == 'persp':\n if focal_length is None:\n focal_length = 1\n elif focal_length <= 0:\n raise ValueError(f'focal_length = {focal_length} must be greater than 0')\n self._focal_length = focal_length\n else:\n if focal_length not in (None, np.inf):\n raise ValueError(f'focal_length = {focal_length} must be None for proj_type = {proj_type}')\n self._focal_length = np.inf", + "docstring": "Set the projection type. Parameters ---------- proj_type : {'persp', 'ortho'} The projection type. focal_length : float, default: None For a projection type of 'persp', the focal length of the virtual camera. Must be > 0. If None, defaults to 1. The focal length can be computed from a desired Field Of View via the equation: focal_length = 1/tan(FOV/2)", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:set_proj_type arg:self arg:proj_type arg:focal_length arguments arg arg arg Call If Compare If Compare Assign If Compare Raise Call Assign If Compare Raise Call Assign" + }, + { + "library": "django", + "name": "tuple", + "source_code": "@property\ndef tuple(self):\n return self._cs.tuple", + "docstring": "Return a tuple of the point.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "predict", + "source_code": "@torch.no_grad()\ndef predict(self, keypoints: Optional[Keypoints | Tensor]=None, keypoints_labels: Optional[Tensor]=None, boxes: Optional[Boxes | Tensor]=None, masks: Optional[Tensor]=None, multimask_output: bool=True, output_original_size: bool=True) -> SegmentationResults:\n KORNIA_CHECK(self.is_image_set, 'An image must be set with `self.set_image(...)` before `predict` be called!')\n prompts = self.preprocess_prompts(keypoints, keypoints_labels, boxes, masks)\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=prompts.points, boxes=prompts.boxes, masks=prompts.masks)\n del prompts\n logits, scores = self.model.mask_decoder(image_embeddings=self.image_embeddings, image_pe=self.model.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output)\n results = SegmentationResults(logits, scores)\n if output_original_size and isinstance(self._input_image_size, tuple) and isinstance(self._original_image_size, tuple):\n results.original_res_logits(self._input_image_size, self._original_image_size, self._input_encoder_size)\n return results", + "docstring": "Predict masks for the given image based on the input prompts. Args: keypoints: Point prompts to the model. Each point is in (X,Y) in pixels. Shape :math:. Where is the number of points and the number of prompts. keypoints_labels: Labels for the point prompts. 1 indicates a foreground point and 0 indicates a background point. Shape :math:. Where is the number of points, and the number of prompts. boxes: A box prompt to the model. If a tensor, should be in a xyxy mode. Shape :math: masks: A low resolution mask input to the model, typically coming from a previous prediction iteration. Has shape :math:, where for SAM, H=W=256. multimask_output: If true, the model will return three masks. For ambiguous input prompts (such as a single click), this will often produce better masks than a single prediction. If only a single mask is needed, the model's predicted quality score can be used to select the best mask. For non-ambiguous prompts, such as multiple input prompts, multimask_output=False can give better results. output_original_size: If true, the logits of will be post-process to match the original input image size. Returns: A prediction with the logits and scores (IoU of each predicted mask)", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\visual_prompter.py", + "ast_data": "FunctionDef name:predict arg:self arg:keypoints arg:keypoints_labels arg:boxes arg:masks arg:multimask_output arg:output_original_size arguments arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call Call Assign Call If BoolOp Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "devices", + "source_code": "def devices(self):\n return self._devices", + "docstring": "List of the names of devices available to execute operations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "allreduce_hook", + "source_code": "def allreduce_hook(state: DefaultState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)", + "docstring": "Implement the FSDP communication hook for `` algorithm and a necessary pre- and post-division of gradients. Args: state (DefaultState): State information, configures pre- and post-division factors. grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py", + "ast_data": "FunctionDef name:allreduce_hook arg:state arg:grad arguments arg arg If Compare Call Call If Compare Call" + }, + { + "library": "django", + "name": "num_pages", + "source_code": "@cached_property\ndef num_pages(self):\n if self.count == 0 and (not self.allow_empty_first_page):\n return 0\n hits = max(1, self.count - self.orphans)\n return ceil(hits / self.per_page)", + "docstring": "Return the total number of pages.", + "type": "method", + "file_path": "django\\django\\core\\paginator.py", + "ast_data": "FunctionDef name:num_pages arg:self arguments arg If BoolOp Compare Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "generate_all_int_dyn_dim_possibilities", + "source_code": "def generate_all_int_dyn_dim_possibilities(my_list: list[DVar]):\n eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]\n neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]\n d_possibilities = [list(i) for i in zip(eq_possibilities, neq_possibilities)]\n all_possibilities = list(itertools.product(*d_possibilities))\n return all_possibilities", + "docstring": "Generate all possibilities of being equal or not equal to dyn for my_list Args: my_list: List of tensor dimensions Returns: A list of a list of constraints. Each list of constraints corresponds to one possibility about the values of the dimension variables", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:generate_all_int_dyn_dim_possibilities arg:my_list arguments arg Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "while_loop_op", + "source_code": "@staticmethod\ndef while_loop_op(op):\n return control_flow_util.IsLoopSwitch(op) or control_flow_util.IsLoopMerge(op) or control_flow_util.IsLoopEnter(op) or control_flow_util.IsLoopExit(op) or TensorTracer.loop_cond_op(op) or (op.type in ('RefNextIteration', 'NextIteration'))", + "docstring": "Returns true if op is one of the special ops of in a while loop. Args: op: A tf.Operation. Returns: True if the given op is one of [Switch, Merge, Enter, Exit, NextIteration, LoopCond], which are all building blocks for TF while loops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:while_loop_op arg:op arguments arg Return return:yes BoolOp Call Call Call Call Call Compare" + }, + { + "library": "sphinx", + "name": "Index", + "source_code": "class Index(ABC):\n name: str\n localname: str\n shortname: str | None = None\n\n def __init__(self, domain: Domain) -> None:\n if not self.name or self.localname is None:\n msg = f'Index subclass {self.__class__.__name__} has no valid name or localname'\n raise SphinxError(msg)\n self.domain = domain\n\n @abstractmethod\n def generate(self, docnames: Iterable[str] | None=None) -> tuple[list[tuple[str, list[IndexEntry]]], bool]:\n raise NotImplementedError", + "docstring": "An Index is the description for a domain-specific index. To add an index to a domain, subclass Index, overriding the three name attributes: * is an identifier used for generating file names. It is also used for a hyperlink target for the index. Therefore, users can refer the index page using `py-modindex``localnameshortnamegenerateindices~sphinx.application.Sphinx.add_index_to_domainref` role.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\_index.py", + "ast_data": "ClassDef name:Index FunctionDef name:__init__ arg:self arg:domain arguments arg arg If BoolOp Compare Assign Raise Call Assign FunctionDef name:generate arg:self arg:docnames arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_broadcast_shape_helper", + "source_code": "def _broadcast_shape_helper(shape_x, shape_y):\n broadcasted_dims = reversed(list(itertools.zip_longest(reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tensor_shape.Dimension(1))))\n return_dims = []\n for dim_x, dim_y in broadcasted_dims:\n if dim_x.value is None or dim_y.value is None:\n if dim_x.value is not None and dim_x.value > 1:\n return_dims.append(dim_x)\n elif dim_y.value is not None and dim_y.value > 1:\n return_dims.append(dim_y)\n else:\n return_dims.append(None)\n elif dim_x.value == 1:\n return_dims.append(dim_y)\n elif dim_y.value == 1:\n return_dims.append(dim_x)\n elif dim_x.value == dim_y.value:\n return_dims.append(dim_x.merge_with(dim_y))\n else:\n return None\n return return_dims", + "docstring": "Helper functions for is_broadcast_compatible and broadcast_shape. Args: shape_x: A shape_y: A Returns: Returns None if the shapes are not broadcast compatible, a list of the broadcast dimensions otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\common_shapes.py", + "ast_data": "FunctionDef name:_broadcast_shape_helper arg:shape_x arg:shape_y arguments arg arg Assign Call Call Call Call Call Call Assign For If BoolOp Compare Compare If BoolOp Compare Compare Call If BoolOp Compare Compare Call Call If Compare Call If Compare Call If Compare Call Call Return return:no Return return:yes" + }, + { + "library": "scipy", + "name": "complete", + "source_code": "@lazy_cython\ndef complete(y):\n return linkage(y, method='complete', metric='euclidean')", + "docstring": "Perform complete/max/farthest point linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:complete arg:y arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "export", + "source_code": "def export(fn):\n fn._torchscript_modifier = FunctionModifiers.EXPORT\n return fn", + "docstring": "This decorator indicates that a method on an `ScriptModuleforward@torch.jit.exportimplicitly_compiled_methodmforwardanother_forwardimplicitly_compiled_methodunused_method@torch.jit.export` m = torch.jit.script(MyModule())", + "type": "function", + "file_path": "pytorch\\torch\\_jit_internal.py", + "ast_data": "FunctionDef name:export arg:fn arguments arg Assign Return return:yes" + }, + { + "library": "pandas", + "name": "sum", + "source_code": "def sum(self, axis: AxisInt=0, min_count: int=0, skipna: bool=True, *args, **kwargs) -> Scalar:\n nv.validate_sum(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n has_na = self.sp_index.ngaps > 0 and (not self._null_fill_value)\n if has_na and (not skipna):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n if self._null_fill_value:\n if check_below_min_count(valid_vals.shape, None, min_count):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum\n else:\n nsparse = self.sp_index.ngaps\n if check_below_min_count(valid_vals.shape, None, min_count - nsparse):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum + self.fill_value * nsparse", + "docstring": "Sum of non-NA/null values Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. min_count : int, default 0 The required number of valid values to perform the summation. If fewer than `` valid values are present, the result will be the missing value indicator for subarray type. *args, **kwargs Not Used. NumPy compatibility. Returns ------- scalar", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py", + "ast_data": "FunctionDef name:sum arg:self arg:axis arg:min_count arg:skipna arguments arg arg arg arg arg arg Call Assign Assign Call Assign BoolOp Compare If BoolOp Return return:yes Call If If Call Return return:yes Call Return return:yes Assign If Call Return return:yes Call Return return:yes" + }, + { + "library": "numpy", + "name": "lagfit", + "source_code": "def lagfit(x, y, deg, rcond=None, full=False, w=None):\n return pu._fit(lagvander, x, y, deg, rcond, full, w)", + "docstring": "Least squares fit of Laguerre series to data. Return the coefficients of a Laguerre series of degree that is the least squares fit to the data values given at points . If is 1-D the returned coefficients will also be 1-D. If is 2-D multiple fits are done, one for each column of , and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), where `degdegdegMyyrcondnumpy.linalg.lstsqw_jxwyV~exceptions.RankWarningrcondlagweight`. References ---------- .. [1] Wikipedia, \"Curve fitting\", Examples -------- >>> import numpy as np >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> rng = np.random.default_rng() >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) array([1.00578369, 1.99417356, 2.99827656]) # may vary", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\laguerre.py", + "ast_data": "FunctionDef name:lagfit arg:x arg:y arg:deg arg:rcond arg:full arg:w arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_max_in_bounds", + "source_code": "def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)", + "docstring": "Ensure the new max value is between valmax and self.val[0].", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_max_in_bounds arg:self arg:max arguments arg arg If Compare If Return return:yes Assign If Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "split_arg_into_blocks", + "source_code": "def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1):\n block_sizes = [dim.value for dim in block_dims]\n if any((d is None for d in block_sizes)):\n block_sizes = block_dims_fn()\n return array_ops.split(arg, block_sizes, axis=axis)", + "docstring": "Split into blocks matching 's . Specifically, if we have a blockwise lower-triangular matrix, with block sizes along the diagonal , this method splits on into tensors, whose shape at is . Args: block_dims: Iterable of . block_dims_fn: Callable returning an iterable of s. arg: . is split into tensors. axis: Python representing the axis to split on. Returns: A list of s.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", + "ast_data": "FunctionDef name:split_arg_into_blocks arg:block_dims arg:block_dims_fn arg:arg arg:axis arguments arg arg arg arg Assign If Call Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "state_dict", + "source_code": "def state_dict(self) -> dict[str, Any]:\n state_dict = self._optimizer.state_dict()\n param_groups = state_dict['param_groups']\n ret_state = {self.ordered_param_keys[st_key]: state_val for st_key, state_val in state_dict['state'].items()}\n ret_groups = []\n for group in param_groups:\n param_keys = [self.ordered_param_keys[param] for param in group['params']]\n ret_group = {'params': sorted(param_keys)}\n for k, v in group.items():\n if k != 'params':\n ret_group[k] = deepcopy(v)\n ret_groups.append(ret_group)\n return self._post_state_dict({'state': ret_state, 'param_groups': ret_groups})", + "docstring": "Return the `` of the optimizer. Instead of using number to index parameters, we will use module fully qualified name (FQN) as the key.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py", + "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Assign Assign Call Assign For Assign Assign Call For Call If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "sequence_categorical_column_with_vocabulary_file", + "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, default_value=None, dtype=dtypes.string):\n return fc.SequenceCategoricalColumn(fc.categorical_column_with_vocabulary_file(key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=num_oov_buckets, default_value=default_value, dtype=dtype))", + "docstring": "A sequence of categorical terms where ids use a vocabulary file. Pass this to or to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: Args: key: A unique string identifying the input feature. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of , if less than length, later values are ignored. If None, it is set to the length of . num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range based on a hash of the input value. A positive can not be specified with . default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to . This can not be specified with a positive . dtype: The type of features. Only string and integer types are supported. Returns: A . Raises: ValueError: is missing or cannot be opened. ValueError: is missing or < 1. ValueError: is a negative integer. ValueError: and are both specified. ValueError: is neither string nor integer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:sequence_categorical_column_with_vocabulary_file arg:key arg:vocabulary_file arg:vocabulary_size arg:num_oov_buckets arg:default_value arg:dtype arguments arg arg arg arg arg arg Return return:yes Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "default_device", + "source_code": "def default_device(self):\n return torch.device('cpu')", + "docstring": "The default device used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new PyTorch arrays. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_device() device(type='cpu') Notes ----- This method returns the static default device when PyTorch is initialized. However, the *current* device used by creation functions (`` etc.) can be changed at runtime. See Also --------", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py", + "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "quote", + "source_code": "def quote(s):\n return s.translate(QUOTE_MAP) if isinstance(s, str) else s", + "docstring": "Ensure that primary key values do not confuse the admin URLs by escaping any '/', '_' and ':' and similarly problematic characters. Similar to urllib.parse.quote(), except that the quoting is slightly different so that it doesn't get automatically unquoted by the web browser.", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\utils.py", + "ast_data": "FunctionDef name:quote arg:s arguments arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "_make_content_disposition", + "source_code": "def _make_content_disposition(disposition, file_name):\n ascii_name = unicodedata.normalize('NFKC', file_name).encode('ascii', errors='ignore').decode()\n header = '{}; filename=\"{}\"'.format(disposition, ascii_name)\n if ascii_name != file_name:\n quoted_name = urllib.parse.quote(file_name)\n header += \"; filename*=UTF-8''{}\".format(quoted_name)\n return header", + "docstring": "Create HTTP header for downloading a file with a UTF-8 filename. This function implements the recommendations of :rfc:. See this and related answers:", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\static.py", + "ast_data": "FunctionDef name:_make_content_disposition arg:disposition arg:file_name arguments arg arg Assign Call Call Call Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "master_target", + "source_code": "@property\ndef master_target(self):\n return self._master_target", + "docstring": "Returns the session master for the corresponding task to connect to.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:master_target arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "yuv422_to_rgb", + "source_code": "def yuv422_to_rgb(imagey: Tensor, imageuv: Tensor) -> Tensor:\n if not isinstance(imagey, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(imagey)}')\n if not isinstance(imageuv, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(imageuv)}')\n if len(imagey.shape) < 3 or imagey.shape[-3] != 1:\n raise ValueError(f'Input imagey size must have a shape of (*, 1, H, W). Got {imagey.shape}')\n if len(imageuv.shape) < 3 or imageuv.shape[-3] != 2:\n raise ValueError(f'Input imageuv size must have a shape of (*, 2, H, W/2). Got {imageuv.shape}')\n if len(imagey.shape) < 2 or imagey.shape[-2] % 2 == 1 or imagey.shape[-1] % 2 == 1:\n raise ValueError(f'Input H&W must be evenly disible by 2. Got {imagey.shape}')\n if len(imageuv.shape) < 2 or len(imagey.shape) < 2 or imagey.shape[-1] / imageuv.shape[-1] != 2:\n raise ValueError(f'Input imageuv W must be half the size of the luma plane. Got {imagey.shape} and {imageuv.shape}')\n yuv444image = torch.cat([imagey, imageuv.repeat_interleave(2, dim=-1)], dim=-3)\n return yuv_to_rgb(yuv444image)", + "docstring": "Convert an YUV422 image to RGB. Input need to be padded to be evenly divisible by 2 vertical. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: imagey: Y (luma) Image plane to be converted to RGB with shape :math:. imageuv: UV (luma) Image planes to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> inputy = torch.rand(2, 1, 4, 6) >>> inputuv = torch.rand(2, 2, 2, 3) >>> output = yuv420_to_rgb(inputy, inputuv) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\yuv.py", + "ast_data": "FunctionDef name:yuv422_to_rgb arg:imagey arg:imageuv arguments arg arg If Call Raise Call Call If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Compare Raise Call If BoolOp Compare Call Compare Call Compare Raise Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, accept_sparse=('csr', 'csc'), reset=False)\n K = self._get_kernel(X, self.X_fit_)\n return np.dot(K, self.dual_coef_)", + "docstring": "Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == \"precomputed\" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\kernel_ridge.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "The serialized bytes of the public key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self._dtype", + "docstring": "The of s handled by this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "module_inputs", + "source_code": "def module_inputs(self) -> Sequence[torch.fx.Node]:\n nodes = list(self.fx_nodes())\n assert len(nodes) > 0, 'Cannot extract module inputs from empty nodes.'\n module_inputs: dict[torch.fx.Node, None] = {}\n node_set: set[torch.fx.Node] = set(nodes)\n\n def _extract_arg_if_node_outside_module(arg: Any):\n if isinstance(arg, torch.fx.Node) and arg not in node_set:\n module_inputs[arg] = None\n for node in nodes:\n pytree.tree_map(_extract_arg_if_node_outside_module, node.args)\n pytree.tree_map(_extract_arg_if_node_outside_module, node.kwargs)\n return list(module_inputs.keys())", + "docstring": "Extract module inputs from the sequence of fx nodes this instance holds. All node args that are produced by nodes outside of the module are considered module inputs. The order of returned module inputs is the same as the their use order. ### Known limitations The original ordering of module inputs is not preserved. There is no meta information to be found from the that can be used to recover the original ordering. Returns: Sequence of module inputs.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:module_inputs arg:self arguments arg Assign Call Call Compare Call Call FunctionDef name:_extract_arg_if_node_outside_module arg:arg arguments arg If BoolOp Call Compare Assign For Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "graph_parents", + "source_code": "@property\n@deprecation.deprecated(None, 'Do not call `graph_parents`.')\ndef graph_parents(self):\n return self._graph_parents", + "docstring": "List of graph dependencies of this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:graph_parents arg:self arguments arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "add_gutters", + "source_code": "def add_gutters(self, points, center, trans_fwd, trans_inv):\n half_width = self.width / 2\n low_gutter = trans_inv(trans_fwd(center) - half_width)\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n high_gutter = trans_inv(trans_fwd(center) + half_width)\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = '{:.1%} of the points cannot be placed; you may want to decrease the size of the markers or use stripplot.'.format(gutter_prop)\n warnings.warn(msg, UserWarning)\n return points", + "docstring": "Stop points from extending beyond their territory.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:add_gutters arg:self arg:points arg:center arg:trans_fwd arg:trans_inv arguments arg arg arg arg arg Assign Assign Call Call Assign Compare If Call Assign Assign Call Call Assign Compare If Call Assign Assign Call Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "add_patch", + "source_code": "def add_patch(self, p):\n _api.check_isinstance(mpatches.Patch, p=p)\n self._set_artist_props(p)\n if p.get_clip_path() is None:\n p.set_clip_path(self.patch)\n self._update_patch_limits(p)\n self._children.append(p)\n p._remove_method = self._children.remove\n return p", + "docstring": "Add a to the Axes; return the patch.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:add_patch arg:self arg:p arguments arg arg Call Call If Compare Call Call Call Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_shape", + "source_code": "def _check_shape(param, param_shape, name):\n param = np.array(param)\n if param.shape != param_shape:\n raise ValueError(\"The parameter '%s' should have the shape of %s, but got %s\" % (name, param_shape, param.shape))", + "docstring": "Validate the shape of the input parameter 'param'. Parameters ---------- param : array param_shape : tuple name : str", + "type": "function", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_check_shape arg:param arg:param_shape arg:name arguments arg arg arg Assign Call If Compare Raise Call" + }, + { + "library": "numpy", + "name": "parse_targets", + "source_code": "def parse_targets(self, source):\n self.dist_log(\"looking for '@targets' inside -> \", source)\n with open(source) as fd:\n tokens = ''\n max_to_reach = 1000\n start_with = '@targets'\n start_pos = -1\n end_with = '*/'\n end_pos = -1\n for current_line, line in enumerate(fd):\n if current_line == max_to_reach:\n self.dist_fatal('reached the max of lines')\n break\n if start_pos == -1:\n start_pos = line.find(start_with)\n if start_pos == -1:\n continue\n start_pos += len(start_with)\n tokens += line\n end_pos = line.find(end_with)\n if end_pos != -1:\n end_pos += len(tokens) - len(line)\n break\n if start_pos == -1:\n self.dist_fatal(\"expected to find '%s' within a C comment\" % start_with)\n if end_pos == -1:\n self.dist_fatal(\"expected to end with '%s'\" % end_with)\n tokens = tokens[start_pos:end_pos]\n return self._parse_target_tokens(tokens)", + "docstring": "Fetch and parse configuration statements that required for defining the targeted CPU features, statements should be declared in the top of source in between **C** comment and start with a special mark **@targets**. Configuration statements are sort of keywords representing CPU features names, group of statements and policies, combined together to determine the required optimization. Parameters ---------- source : str the path of **C** source file. Returns ------- - bool, True if group has the 'baseline' option - list, list of CPU features - list, list of extra compiler flags", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:parse_targets arg:self arg:source arguments arg arg Call With Call Assign Assign Assign Assign Assign Assign For Call If Compare Call If Compare Assign Call If Compare Call Assign Call If Compare Call Call If Compare Call If Compare Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "inserting_after", + "source_code": "@compatibility(is_backward_compatible=True)\ndef inserting_after(self, n: Optional[Node]=None):\n if n is None:\n return self.inserting_before(self._root)\n assert n.graph == self, 'Node to insert after is not in graph.'\n return _InsertPoint(self, n.append)", + "docstring": "Set the point at which create_node and companion methods will insert into the graph. When used within a 'with' statement, this will temporary set the insert point and then restore it when the with statement exits:: with g.inserting_after(n): ... # inserting after node n ... # insert point restored to what it was previously g.inserting_after(n) # set the insert point permanently Args: n (Optional[Node]): The node before which to insert. If None this will insert after the beginning of the entire graph. Returns: A resource manager that will restore the insert point on ``.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:inserting_after arg:self arg:n arguments arg arg If Compare Return return:yes Call Compare Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "hermvander", + "source_code": "def hermvander(x, deg):\n ideg = pu._as_int(deg, 'deg')\n if ideg < 0:\n raise ValueError('deg must be non-negative')\n x = np.array(x, copy=None, ndmin=1) + 0.0\n dims = (ideg + 1,) + x.shape\n dtyp = x.dtype\n v = np.empty(dims, dtype=dtyp)\n v[0] = x * 0 + 1\n if ideg > 0:\n x2 = x * 2\n v[1] = x2\n for i in range(2, ideg + 1):\n v[i] = v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))\n return np.moveaxis(v, 0, -1)", + "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = H_i(x), where ``0 >> import numpy as np >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) array([[ 1., -2., 2., 4.], [ 1., 0., -2., -0.], [ 1., 2., 2., -4.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign Assign For Call Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Curve", + "source_code": "@_register_style(_style_list, name='-')\nclass Curve(_Curve):\n\n def __init__(self):\n super().__init__(head_length=0.2, head_width=0.1)", + "docstring": "A simple curve without any arrow head.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:Curve FunctionDef name:__init__ arg:self arguments arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "DistributeConfig", + "source_code": "class DistributeConfig(collections.namedtuple('DistributeConfig', ['train_distribute', 'eval_distribute', 'remote_cluster'])):\n\n def __new__(cls, train_distribute=None, eval_distribute=None, remote_cluster=None):\n return super(DistributeConfig, cls).__new__(cls, train_distribute, eval_distribute, remote_cluster)", + "docstring": "A config tuple for distribution strategies. Attributes: train_distribute: a object for training. eval_distribute: an optional object for evaluation. remote_cluster: a dict, or object specifying the cluster configurations. If this is given, the method will be running as a standalone client which connects to the cluster for training.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_config.py", + "ast_data": "ClassDef name:DistributeConfig Call FunctionDef name:__new__ arg:cls arg:train_distribute arg:eval_distribute arg:remote_cluster arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "copy_origin", + "source_code": "def copy_origin(from_node, to_node):\n origin = anno.Basic.ORIGIN.of(from_node, default=None)\n if origin is None:\n return\n if not isinstance(to_node, (list, tuple)):\n to_node = (to_node,)\n for node in to_node:\n for n in gast.walk(node):\n anno.setanno(n, anno.Basic.ORIGIN, origin)", + "docstring": "Copies the origin info from a node to another, recursively.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py", + "ast_data": "FunctionDef name:copy_origin arg:from_node arg:to_node arguments arg arg Assign Call If Compare Return return:no If Call Assign For For Call Call" + }, + { + "library": "tensorflow", + "name": "angle", + "source_code": "@tf_export('math.angle', v1=['math.angle', 'angle'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('angle')\ndef angle(input, name=None):\n with ops.name_scope(name, 'Angle', [input]) as name:\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype.is_complex:\n return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.where(input < 0, np.pi * array_ops.ones_like(input), array_ops.zeros_like(input))", + "docstring": "Returns the element-wise argument of a complex (or real) tensor. Given a tensor , this operation returns a tensor of type that is the argument of each element in considered as a complex number. The elements in are considered to be complex numbers of the form \\\\(a + bj\\\\), where *a* is the real part and *b* is the imaginary part. If is real then *b* is zero by definition. The argument returned by this function is of the form \\\\(atan2(b, a)\\\\). If is real, a tensor of all zeros is returned. For example: Args: input: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A of type or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:angle arg:input arg:name arguments arg arg With Call Assign Call If Return return:yes Call Return return:yes Call Compare Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "list_physical_devices", + "source_code": "@tf_export('config.list_physical_devices', 'config.experimental.list_physical_devices')\n@deprecation.deprecated_endpoints('config.experimental.list_physical_devices')\ndef list_physical_devices(device_type=None):\n return context.context().list_physical_devices(device_type)", + "docstring": "Return a list of physical devices visible to the host runtime. Physical devices are hardware devices present on the host machine. By default all discovered CPU and GPU devices are considered visible. This API allows querying the physical hardware resources prior to runtime initialization. Thus, giving an opportunity to call any additional configuration APIs. This is in contrast to , which triggers runtime initialization in order to list the configured devices. The following example lists the number of visible GPUs on the host. >>> physical_devices = tf.config.list_physical_devices('GPU') >>> print(\"Num GPUs:\", len(physical_devices)) Num GPUs: ... However, the number of GPUs available to the runtime may change during runtime initialization due to marking certain devices as not visible or configuring multiple logical devices. Args: device_type: (optional string) Only include devices matching this device type. For example \"CPU\" or \"GPU\". Notes: 1. If provided with any numerical values or any string other than supported device type such as 'CPU' it returns an empty list instead of raising error. 2. For default value it returns all physical devices Returns: List of discovered objects", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:list_physical_devices arg:device_type arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "pids", + "source_code": "@abc.abstractmethod\ndef pids(self) -> dict[int, int]:\n raise NotImplementedError", + "docstring": "Return pids of processes mapped by their respective local_ranks.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", + "ast_data": "FunctionDef name:pids arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "_maybe_wrap_custom_getter", + "source_code": "def _maybe_wrap_custom_getter(custom_getter, old_getter):\n if old_getter is None:\n return custom_getter\n\n def wrapped_custom_getter(getter, *args, **kwargs):\n return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)\n return wrapped_custom_getter", + "docstring": "Wrap a call to a custom_getter to use the old_getter internally.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:_maybe_wrap_custom_getter arg:custom_getter arg:old_getter arguments arg arg If Compare Return return:yes FunctionDef name:wrapped_custom_getter arg:getter arguments arg arg arg Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "GraphID", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass GraphID:\n id: int", + "docstring": "Unique counter of a cuda graph recording", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "ClassDef name:GraphID Call" + }, + { + "library": "scipy", + "name": "iirpeak", + "source_code": "def iirpeak(w0, Q, fs=2.0):\n return _design_notch_peak_filter(w0, Q, 'peak', fs)", + "docstring": "Design second-order IIR peak (resonant) digital filter. A peak filter is a band-pass filter with a narrow bandwidth (high quality factor). It rejects components outside a narrow frequency band. Parameters ---------- w0 : float Frequency to be retained in a signal. If is specified, this is in the same units as . By default, it is a normalized scalar that must satisfy ``0 >> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 1000.0 # Sample frequency (Hz) >>> f0 = 300.0 # Frequency to be retained (Hz) >>> Q = 30.0 # Quality factor >>> # Design peak filter >>> b, a = signal.iirpeak(f0, Q, fs) >>> # Frequency response >>> freq, h = signal.freqz(b, a, fs=fs) >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') >>> ax[0].set_title(\"Frequency Response\") >>> ax[0].set_ylabel(\"Amplitude [dB]\", color='blue') >>> ax[0].set_xlim([0, 500]) >>> ax[0].set_ylim([-50, 10]) >>> ax[0].grid(True) >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') >>> ax[1].set_ylabel(\"Phase [deg]\", color='green') >>> ax[1].set_xlabel(\"Frequency [Hz]\") >>> ax[1].set_xlim([0, 500]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid(True) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:iirpeak arg:w0 arg:Q arg:fs arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "import_submodule", + "source_code": "def import_submodule(mod: types.ModuleType):\n for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))):\n if filename.endswith('.py') and filename[0] != '_':\n importlib.import_module(f'{mod.__name__}.{filename[:-3]}')", + "docstring": "Ensure all the files in a given submodule are imported", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:import_submodule arg:mod arguments arg For Call Call Call Call If BoolOp Call Compare Call" + }, + { + "library": "pytorch", + "name": "forbid_in_graph", + "source_code": "def forbid_in_graph(fn):\n if isinstance(fn, (list, tuple)):\n return [forbid_in_graph(x) for x in fn]\n assert callable(fn), 'forbid_in_graph applies only to callables'\n fn._dynamo_forbidden = True\n return fn", + "docstring": "Customize which functions TorchDynamo will assert are not present while tracing. If you want a graph break on this function instead, use disallow_in_graph. TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust documentation would not be amiss.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:forbid_in_graph arg:fn arguments arg If Call Return return:yes Call Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "load_all_fcompiler_classes", + "source_code": "def load_all_fcompiler_classes():\n from glob import glob\n global fcompiler_class, fcompiler_aliases\n if fcompiler_class is not None:\n return\n pys = os.path.join(os.path.dirname(__file__), '*.py')\n fcompiler_class = {}\n fcompiler_aliases = {}\n for fname in glob(pys):\n module_name, ext = os.path.splitext(os.path.basename(fname))\n module_name = 'numpy.distutils.fcompiler.' + module_name\n __import__(module_name)\n module = sys.modules[module_name]\n if hasattr(module, 'compilers'):\n for cname in module.compilers:\n klass = getattr(module, cname)\n desc = (klass.compiler_type, klass, klass.description)\n fcompiler_class[klass.compiler_type] = desc\n for alias in klass.compiler_aliases:\n if alias in fcompiler_aliases:\n raise ValueError('alias %r defined for both %s and %s' % (alias, klass.__name__, fcompiler_aliases[alias][1].__name__))\n fcompiler_aliases[alias] = desc", + "docstring": "Cache all the FCompiler classes found in modules in the numpy.distutils.fcompiler package.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:load_all_fcompiler_classes arguments If Compare Return return:no Assign Call Call Assign Assign For Call Assign Call Call Assign Call Assign If Call For Assign Call Assign Assign For If Compare Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "parents", + "source_code": "@property\ndef parents(self):\n return [self.categorical_column, self.weight_feature_key]", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "eye", + "source_code": "@tf_export('eye', 'linalg.eye')\n@dispatch.add_dispatch_support\ndef eye(num_rows, num_columns=None, batch_shape=None, dtype=dtypes.float32, name=None):\n return linalg_ops_impl.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype, name=name)", + "docstring": "Construct an identity matrix, or a batch of matrices. See also , , , . Args: num_rows: Non-negative scalar giving the number of rows in each batch matrix. num_columns: Optional non-negative scalar giving the number of columns in each batch matrix. Defaults to . batch_shape: A list or tuple of Python integers or a 1-D . If provided, the returned will have leading batch dimensions of this shape. dtype: The type of an element in the resulting name: A name for this . Defaults to \"eye\". Returns: A of shape", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py", + "ast_data": "FunctionDef name:eye arg:num_rows arg:num_columns arg:batch_shape arg:dtype arg:name arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "sinhm", + "source_code": "@_apply_over_batch(('A', 2))\ndef sinhm(A):\n A = _asarray_square(A)\n return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))", + "docstring": "Compute the hyperbolic matrix sine. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- sinhm : (N, N) ndarray Hyperbolic matrix sine of Examples -------- >>> import numpy as np >>> from scipy.linalg import tanhm, sinhm, coshm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> s = sinhm(a) >>> s array([[ 10.57300653, 39.28826594], [ 13.09608865, 49.86127247]]) Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) >>> t = tanhm(a) >>> c = coshm(a) >>> t - s.dot(np.linalg.inv(c)) array([[ 2.72004641e-15, 4.55191440e-15], [ 0.00000000e+00, -5.55111512e-16]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:sinhm arg:A arguments arg Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "django", + "name": "DateDetailView", + "source_code": "class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):\n template_name_suffix = '_detail'", + "docstring": "Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:DateDetailView Assign" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, gm: torch.fx.GraphModule, has_user_defined_triton_kernels: bool=False) -> None:\n self._stream = io.BytesIO()\n super().__init__(self._stream)\n self.dispatch_table = copyreg.dispatch_table.copy()\n self.dispatch_table.update({FakeTensor: functools.partial(self._reduce_fake_tensor), torch.Tensor: functools.partial(self._reduce_tensor), torch.nn.parameter.Parameter: functools.partial(self._reduce_tensor), torch.SymInt: functools.partial(self._reduce_symint), torch.fx.experimental._backward_state.BackwardState: functools.partial(self._reduce_unsupported)})\n if has_user_defined_triton_kernels:\n self.dispatch_table[gm.__class__] = functools.partial(self._reduce_graph_module)\n self.fast = True", + "docstring": "Create an FX graph pickler. If include_non_inlined=True, then pickling will include the _values_ for all Tensors. (Note that any tensors are constants attached as attributes to the GraphModule). Otherwise, pickling will include only the metadata for these tensors.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:gm arg:has_user_defined_triton_kernels arguments arg arg arg Assign Call Call Call Assign Call Call Call Call Call Call Call If Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "_update_props", + "source_code": "def _update_props(self, props, errfmt):\n ret = []\n with cbook._setattr_cm(self, eventson=False):\n for k, v in props.items():\n if k == 'axes':\n ret.append(setattr(self, k, v))\n else:\n func = getattr(self, f'set_{k}', None)\n if not callable(func):\n raise AttributeError(errfmt.format(cls=type(self), prop_name=k), name=k)\n ret.append(func(v))\n if ret:\n self.pchanged()\n self.stale = True\n return ret", + "docstring": "Helper for and . *errfmt* is used to generate error messages for invalid property names; it gets formatted with `` for \"{cls}\" and the property name for \"{prop_name}\".", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:_update_props arg:self arg:props arg:errfmt arguments arg arg arg Assign With Call For Call If Compare Call Call Assign Call If Call Raise Call Call Call Call Call If Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_offset_transform", + "source_code": "def get_offset_transform(self):\n if self._offset_transform is None:\n self._offset_transform = transforms.IdentityTransform()\n elif not isinstance(self._offset_transform, transforms.Transform) and hasattr(self._offset_transform, '_as_mpl_transform'):\n self._offset_transform = self._offset_transform._as_mpl_transform(self.axes)\n return self._offset_transform", + "docstring": "Return the instance used by this artist offset.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_offset_transform arg:self arguments arg If Compare Assign Call If BoolOp Call Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_parallel_predict_log_proba", + "source_code": "def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes, params):\n n_samples = X.shape[0]\n log_proba = np.empty((n_samples, n_classes))\n log_proba.fill(-np.inf)\n all_classes = np.arange(n_classes, dtype=int)\n for estimator, features in zip(estimators, estimators_features):\n log_proba_estimator = estimator.predict_log_proba(X[:, features], **params)\n if n_classes == len(estimator.classes_):\n log_proba = np.logaddexp(log_proba, log_proba_estimator)\n else:\n log_proba[:, estimator.classes_] = np.logaddexp(log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))])\n missing = np.setdiff1d(all_classes, estimator.classes_)\n log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)\n return log_proba", + "docstring": "Private function used to compute log probabilities within a job.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_parallel_predict_log_proba arg:estimators arg:estimators_features arg:X arg:n_classes arg:params arguments arg arg arg arg arg Assign Assign Call Call Assign Call For Call Assign Call If Compare Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "most_specific_common_supertype", + "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['NamedTuple']:\n if not all((isinstance(other, NamedTuple) and self.type_name == other.type_name and (self.attribute_names == other.attribute_names) for other in others)):\n return None\n supertyped_attributes = self.attributes.most_specific_common_supertype([other.attributes for other in others])\n if supertyped_attributes is None:\n return None\n return NamedTuple(self.type_name, self.attribute_names, supertyped_attributes.components, self._placeholder_type)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py", + "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call BoolOp Call Compare Compare Return return:no Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_StatelessRandomGammaV3Grad", + "source_code": "@ops.RegisterGradient('StatelessRandomGammaV3')\ndef _StatelessRandomGammaV3Grad(op: ops.Operation, grad):\n shape = op.inputs[0]\n alpha = op.inputs[4]\n sample = op.outputs[0]\n with ops.control_dependencies([grad]):\n return (None, None, None, None, _StatelessGammaGradAlpha(shape, alpha, sample, grad))", + "docstring": "Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A operation. We assume that the inputs to the operation are , , , , and tensors, and the output is the tensor. grad: The incoming gradient of the same shape as . Returns: A with derivatives . References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] ( ([pdf] (", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py", + "ast_data": "FunctionDef name:_StatelessRandomGammaV3Grad arg:op arg:grad arguments arg arg Assign Assign Assign With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, on_ui_exit=None, config=None):\n self._on_ui_exit = on_ui_exit\n self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry()\n self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()\n self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)\n self._config = config or cli_config.CLIConfig()\n self._config_argparser = argparse.ArgumentParser(description='config command', usage=argparse.SUPPRESS)\n subparsers = self._config_argparser.add_subparsers()\n set_parser = subparsers.add_parser('set')\n set_parser.add_argument('property_name', type=str)\n set_parser.add_argument('property_value', type=str)\n set_parser = subparsers.add_parser('show')\n self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=['cfg'])", + "docstring": "Constructor of the base class. Args: on_ui_exit: () the callback to be called when the UI exits. config: An instance of carrying user-facing configurations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:on_ui_exit arg:config arguments arg arg arg Assign Assign Call Assign Call Call Assign BoolOp Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "batch_reduce", + "source_code": "def batch_reduce(self, reduce_op, value_destination_pairs, options=None):\n if options is None:\n options = collective_util.Options()\n if not _validate_value_destination_pairs(value_destination_pairs):\n value_destination_pairs = _normalize_value_destination_pairs(value_destination_pairs)\n for _, d in value_destination_pairs:\n validate_destinations(d)\n if self._num_between_graph_workers == 1 and _all_devices_match(value_destination_pairs, self._canonicalize_devices) and (len(value_destination_pairs[0][0].values) == 1):\n return [distribute_utils.regroup(v.values, wrap_class=value_lib.Mirrored) for v, _ in value_destination_pairs]\n if options is None:\n options = collective_util.Options()\n return self.batch_reduce_implementation(reduce_op, value_destination_pairs, options)", + "docstring": "Reduce values to destinations in batches. See . This can only be called in the cross-replica context. Args: reduce_op: a specifying how values should be combined. value_destination_pairs: a sequence of (value, destinations) pairs. See for descriptions. options: a . See for details. Returns: A list of or , one per pair in . Raises: ValueError: if is not an iterable of tuples of and destinations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:batch_reduce arg:self arg:reduce_op arg:value_destination_pairs arg:options arguments arg arg arg arg If Compare Assign Call If Call Assign Call For Call If BoolOp Compare Call Compare Call Return return:yes Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, filename, key_column_index=TextFileIndex.WHOLE_LINE, value_column_index=TextFileIndex.LINE_NUMBER, vocab_size=None, delimiter='\\t', name='text_file_id_table_init', key_dtype=dtypes.string):\n super(TextFileIdTableInitializer, self).__init__(filename, key_dtype, key_column_index, dtypes.int64, value_column_index, vocab_size=vocab_size, delimiter=delimiter, name=name)", + "docstring": "Constructs an initializer for an string-to-id table from a text file. It populates a table that its key and value types are string and int64, respectively. It generates one key-value pair per line. The content of the key and value are specified by the key_index and value_index. - TextFileIndex.LINE_NUMBER means use the line number starting from zero, expects data type int64. - TextFileIndex.WHOLE_LINE means use the whole line content, expects data type string. - A value >=0 means use the index (starting at zero) of the split line based on . Args: filename: The filename of the text file to be used for initialization. The path must be accessible from wherever the graph is initialized (eg. trainer or eval workers). The filename may be a scalar . key_column_index: The column index from the text file to get the values from. The default is to use the whole line content. value_column_index: The column index from the text file to get the values from. The default is to use the line number, starting from zero. vocab_size: The number of elements in the file, if known. delimiter: The delimiter to separate fields in a line. name: Optional name for the op. key_dtype: The data type. Raises: TypeError: when the filename is empty, or when the table key and value data types do not match the expected data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:filename arg:key_column_index arg:value_column_index arg:vocab_size arg:delimiter arg:name arg:key_dtype arguments arg arg arg arg arg arg arg arg Call Call" + }, + { + "library": "scikit-learn", + "name": "HalfTweedieLoss", + "source_code": "class HalfTweedieLoss(BaseLoss):\n\n def __init__(self, sample_weight=None, power=1.5):\n super().__init__(closs=CyHalfTweedieLoss(power=float(power)), link=LogLink())\n if self.closs.power <= 0:\n self.interval_y_true = Interval(-np.inf, np.inf, False, False)\n elif self.closs.power < 2:\n self.interval_y_true = Interval(0, np.inf, True, False)\n else:\n self.interval_y_true = Interval(0, np.inf, False, False)\n\n def constant_to_optimal_zero(self, y_true, sample_weight=None):\n if self.closs.power == 0:\n return HalfSquaredError().constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)\n elif self.closs.power == 1:\n return HalfPoissonLoss().constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)\n elif self.closs.power == 2:\n return HalfGammaLoss().constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)\n else:\n p = self.closs.power\n term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)\n if sample_weight is not None:\n term *= sample_weight\n return term", + "docstring": "Half Tweedie deviance loss with log-link, for regression. Domain: y_true in real numbers for power <= 0 y_true in non-negative real numbers for 0 < power < 2 y_true in positive real numbers for 2 <= power y_pred in positive real numbers power in real numbers Link: y_pred = exp(raw_prediction) For a given sample x_i, half Tweedie deviance loss with p=power is defined as:: loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) - y_true_i * exp(raw_prediction_i)**(1-p) / (1-p) + exp(raw_prediction_i)**(2-p) / (2-p) Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link, HalfPoissonLoss and HalfGammaLoss. We also skip constant terms, but those are different for p=0, 1, 2. Therefore, the loss is not continuous in . Note furthermore that although no Tweedie distribution exists for 0 < power < 1, it still gives a strictly consistent scoring function for the expectation.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "ClassDef name:HalfTweedieLoss FunctionDef name:__init__ arg:self arg:sample_weight arg:power arguments arg arg arg Call Call Call Call Call If Compare Assign Call If Compare Assign Call Assign Call FunctionDef name:constant_to_optimal_zero arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Assign Assign Call Call If Compare Return return:yes" + }, + { + "library": "django", + "name": "choices", + "source_code": "def choices(self, changelist):\n raise NotImplementedError('subclasses of ListFilter must provide a choices() method')", + "docstring": "Return choices ready to be output in the template. is the ChangeList to be displayed.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\filters.py", + "ast_data": "FunctionDef name:choices arg:self arg:changelist arguments arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "_compute_dof", + "source_code": "def _compute_dof(self, kind, dz=None):\n if kind == 'user':\n if dz is None:\n raise ValueError(\"For a CubicTriInterpolator with *kind*='user', a valid *dz* argument is expected.\")\n TE = _DOF_estimator_user(self, dz=dz)\n elif kind == 'geom':\n TE = _DOF_estimator_geom(self)\n else:\n TE = _DOF_estimator_min_E(self)\n return TE.compute_dof_from_df()", + "docstring": "Compute and return nodal dofs according to kind. Parameters ---------- kind : {'min_E', 'geom', 'user'} Choice of the _DOF_estimator subclass to estimate the gradient. dz : tuple of array-likes (dzdx, dzdy), optional Used only if *kind*=user; in this case passed to the :class:. Returns ------- array-like, shape (npts, 2) Estimation of the gradient at triangulation nodes (stored as degree of freedoms of reduced-HCT triangle elements).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_compute_dof arg:self arg:kind arg:dz arguments arg arg arg If Compare If Compare Raise Call Assign Call If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_ones_like", + "source_code": "def _ones_like(x):\n if x.get_shape().is_fully_defined():\n return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)\n return array_ops.ones_like(x)", + "docstring": "Convenience function attempts to statically construct .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_ones_like arg:x arguments arg If Call Call Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "normalize_path_patterns", + "source_code": "def normalize_path_patterns(patterns):\n patterns = [os.path.normcase(p) for p in patterns]\n dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}\n norm_patterns = []\n for pattern in patterns:\n for dir_suffix in dir_suffixes:\n if pattern.endswith(dir_suffix):\n norm_patterns.append(pattern.removesuffix(dir_suffix))\n break\n else:\n norm_patterns.append(pattern)\n return norm_patterns", + "docstring": "Normalize an iterable of glob style patterns based on OS.", + "type": "function", + "file_path": "django\\django\\core\\management\\utils.py", + "ast_data": "FunctionDef name:normalize_path_patterns arg:patterns arguments arg Assign Call Assign Assign For For If Call Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "triangulate_points", + "source_code": "def triangulate_points(P1: torch.Tensor, P2: torch.Tensor, points1: torch.Tensor, points2: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_SHAPE(P1, ['*', '3', '4'])\n KORNIA_CHECK_SHAPE(P2, ['*', '3', '4'])\n KORNIA_CHECK_SHAPE(points1, ['*', 'N', '2'])\n KORNIA_CHECK_SHAPE(points2, ['*', 'N', '2'])\n points_shape = max(points1.shape, points2.shape)\n X = zeros(points_shape[:-1] + (4, 4)).type_as(points1)\n for i in range(4):\n X[..., 0, i] = points1[..., 0] * P1[..., 2:3, i] - P1[..., 0:1, i]\n X[..., 1, i] = points1[..., 1] * P1[..., 2:3, i] - P1[..., 1:2, i]\n X[..., 2, i] = points2[..., 0] * P2[..., 2:3, i] - P2[..., 0:1, i]\n X[..., 3, i] = points2[..., 1] * P2[..., 2:3, i] - P2[..., 1:2, i]\n _, _, V = _torch_svd_cast(X)\n points3d_h = V[..., -1]\n points3d: torch.Tensor = convert_points_from_homogeneous(points3d_h)\n return points3d", + "docstring": "Reconstructs a bunch of points by triangulation. Triangulates the 3d position of 2d correspondences between several images. Reference: Internally it uses DLT method from Hartley/Zisserman 12.2 pag.312 The input points are assumed to be in homogeneous coordinate system and being inliers correspondences. The method does not perform any robust estimation. Args: P1: The projection matrix for the first camera with shape :math:. P2: The projection matrix for the second camera with shape :math:. points1: The set of points seen from the first camera frame in the camera plane coordinates with shape :math:. points2: The set of points seen from the second camera frame in the camera plane coordinates with shape :math:. Returns: The reconstructed 3d points in the world frame with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\triangulation.py", + "ast_data": "FunctionDef name:triangulate_points arg:P1 arg:P2 arg:points1 arg:points2 arguments arg arg arg arg Call Call Call Call Assign Call Assign Call Call For Call Assign Assign Assign Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_cuda", + "source_code": "@property\ndef is_cuda(self) -> bool:\n return self.data.is_cuda", + "docstring": "Return true if stored on a gpu.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\rnn.py", + "ast_data": "FunctionDef name:is_cuda arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "decorator_from_middleware_with_args", + "source_code": "def decorator_from_middleware_with_args(middleware_class):\n return make_middleware_decorator(middleware_class)", + "docstring": "Like decorator_from_middleware, but return a function that accepts the arguments to be passed to the middleware_class. Use like:: cache_page = decorator_from_middleware_with_args(CacheMiddleware) # ... @cache_page(3600) def my_view(request): # ...", + "type": "function", + "file_path": "django\\django\\utils\\decorators.py", + "ast_data": "FunctionDef name:decorator_from_middleware_with_args arg:middleware_class arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "broadcasting_binary_op_wrapper", + "source_code": "def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):\n broadcast_dims = broadcast_dims or []\n broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)\n x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)\n return fn(x, y, name=name)", + "docstring": "Inner wrapper function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py", + "ast_data": "FunctionDef name:broadcasting_binary_op_wrapper arg:x arg:y arg:broadcast_dims arg:name arguments arg arg arg arg Assign BoolOp Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_save_and_write_assets", + "source_code": "def _save_and_write_assets(self, assets_collection_to_add=None):\n asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add)\n if not asset_filename_map:\n tf_logging.info('No assets to write.')\n return\n copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)", + "docstring": "Saves asset to the meta graph and writes asset files to disk. Args: assets_collection_to_add: The collection where the asset paths are setup.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:_save_and_write_assets arg:self arg:assets_collection_to_add arguments arg arg Assign Call If Call Return return:no Call" + }, + { + "library": "pandas", + "name": "_dtype_to_default_stata_fmt", + "source_code": "def _dtype_to_default_stata_fmt(dtype: np.dtype, column: Series, dta_version: int=114, force_strl: bool=False) -> str:\n if dta_version < 117:\n max_str_len = 244\n else:\n max_str_len = 2045\n if force_strl:\n return '%9s'\n if dtype.type is np.object_:\n itemsize = max_len_string_array(ensure_object(column._values))\n if itemsize > max_str_len:\n if dta_version >= 117:\n return '%9s'\n else:\n raise ValueError(excessive_string_length_error.format(column.name))\n return '%' + str(max(itemsize, 1)) + 's'\n elif dtype == np.float64:\n return '%10.0g'\n elif dtype == np.float32:\n return '%9.0g'\n elif dtype == np.int32:\n return '%12.0g'\n elif dtype in (np.int8, np.int16):\n return '%8.0g'\n else:\n raise NotImplementedError(f'Data type {dtype} not supported.')", + "docstring": "Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> \"%DDs\" where DD is the length of the string. If not a string, raise ValueError float64 -> \"%10.0g\" float32 -> \"%9.0g\" int64 -> \"%9.0g\" int32 -> \"%12.0g\" int16 -> \"%8.0g\" int8 -> \"%8.0g\" strl -> \"%9s\"", + "type": "function", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_dtype_to_default_stata_fmt arg:dtype arg:column arg:dta_version arg:force_strl arguments arg arg arg arg If Compare Assign Assign If Return return:yes If Compare Assign Call Call If Compare If Compare Return return:yes Raise Call Call Return return:yes Call Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "scipy", + "name": "_pearsonr_fisher_ci", + "source_code": "def _pearsonr_fisher_ci(r, n, confidence_level, alternative):\n xp = array_namespace(r)\n ones = xp.ones_like(r)\n n = xp.asarray(n, dtype=r.dtype)\n confidence_level = xp.asarray(confidence_level, dtype=r.dtype)\n with np.errstate(divide='ignore', invalid='ignore'):\n zr = xp.atanh(r)\n se = xp.sqrt(1 / (n - 3))\n if alternative == 'two-sided':\n h = special.ndtri(0.5 + confidence_level / 2)\n zlo = zr - h * se\n zhi = zr + h * se\n rlo = xp.tanh(zlo)\n rhi = xp.tanh(zhi)\n elif alternative == 'less':\n h = special.ndtri(confidence_level)\n zhi = zr + h * se\n rhi = xp.tanh(zhi)\n rlo = -ones\n else:\n h = special.ndtri(confidence_level)\n zlo = zr - h * se\n rlo = xp.tanh(zlo)\n rhi = ones\n mask = n <= 3\n rlo = xpx.at(rlo)[mask].set(-1)\n rhi = xpx.at(rhi)[mask].set(1)\n rlo = rlo[()] if rlo.ndim == 0 else rlo\n rhi = rhi[()] if rhi.ndim == 0 else rhi\n return ConfidenceInterval(low=rlo, high=rhi)", + "docstring": "Compute the confidence interval for Pearson's R. Fisher's transformation is used to compute the confidence interval (", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:_pearsonr_fisher_ci arg:r arg:n arg:confidence_level arg:alternative arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call With Call Assign Call Assign Call If Compare Assign Call Assign Assign Assign Call Assign Call If Compare Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Assign Compare Assign Call Call Assign Call Call Assign Compare Assign Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_write_keras_model_summary", + "source_code": "def _write_keras_model_summary(self):\n with self._train_writer.as_default():\n with summary_ops_v2.record_if(True):\n summary_writable = self.model._is_graph_network or self.model.__class__.__name__ == 'Sequential'\n if summary_writable:\n keras_model_summary('keras', self.model, step=0)", + "docstring": "Writes Keras graph network summary to TensorBoard.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_write_keras_model_summary arg:self arguments arg With Call With Call Assign BoolOp Compare If Call" + }, + { + "library": "tensorflow", + "name": "_create_uninitialized_mirrored_tpu_variables", + "source_code": "def _create_uninitialized_mirrored_tpu_variables(**kwargs):\n if kwargs.get('initial_value', None) is None:\n return _create_mirrored_tpu_variables(**kwargs)\n value_list = []\n initial_value = None\n for i, d in enumerate(devices):\n with ops.device(d):\n if i == 0:\n initial_value = kwargs.get('initial_value', None)\n with maybe_init_scope():\n if initial_value is not None:\n if callable(initial_value):\n initial_value = initial_value()\n initial_value = ops.convert_to_tensor(initial_value, dtype=kwargs.get('dtype', None))\n if i > 0:\n var0name = value_list[0].name.split(':')[0]\n kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n kwargs['initial_value'] = initial_value\n if kwargs.get('dtype', None) is None:\n kwargs['dtype'] = kwargs['initial_value'].dtype\n if kwargs.get('shape', None) is None:\n kwargs['shape'] = kwargs['initial_value'].shape\n with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n v = uninitialized_variable_creator(**kwargs)\n assert not isinstance(v, tpu_values.TPUMirroredVariable)\n value_list.append(v)\n return value_list", + "docstring": "Returns a list of s. The list contains s and can be used to initialize a . Args: **kwargs: the keyword arguments for creating a variable", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_create_uninitialized_mirrored_tpu_variables arguments arg If Compare Call Return return:yes Call Assign Assign For Call With Call If Compare Assign Call With Call If Compare If Call Assign Call Assign Call Call If Compare Assign Call Assign Assign If Compare Call Assign If Compare Call Assign With Call Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, *system, **kwargs):\n dt = kwargs.pop('dt', True)\n super().__init__(*system, **kwargs)\n self.dt = dt", + "docstring": "Initialize the baseclass. The heavy lifting is done by the subclasses.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Assign" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, base=1, month=1, day=1, tz=None):\n rule = rrulewrapper(YEARLY, interval=base, bymonth=month, bymonthday=day, **self.hms0d)\n super().__init__(rule, tz=tz)\n self.base = ticker._Edge_integer(base, 0)", + "docstring": "Parameters ---------- base : int, default: 1 Mark ticks every *base* years. month : int, default: 1 The month on which to place the ticks, starting from 1. Default is January. day : int, default: 1 The day on which to place the ticks. tz : str or , default: :rc: Ticks timezone. If a string, *tz* is passed to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:base arg:month arg:day arg:tz arguments arg arg arg arg arg Assign Call Call Call Assign Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y, **fit_params):\n if _routing_enabled():\n routed_params = process_routing(self, 'fit', **fit_params)\n else:\n routed_params = Bunch(estimator=Bunch(fit=fit_params))\n return self._fit(X, y, **routed_params.estimator.fit)", + "docstring": "Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values. **fit_params : dict - If (default): Parameters directly passed to the `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "to_period", + "source_code": "def to_period(self, freq=None) -> PeriodArray:\n from pandas.core.arrays import PeriodArray\n if self.tz is not None:\n warnings.warn('Converting to PeriodArray/Index representation will drop timezone information.', UserWarning, stacklevel=find_stack_level())\n if freq is None:\n freq = self.freqstr or self.inferred_freq\n if isinstance(self.freq, BaseOffset) and hasattr(self.freq, '_period_dtype_code'):\n freq = PeriodDtype(self.freq)._freqstr\n if freq is None:\n raise ValueError('You must pass a freq argument as current index has none.')\n res = get_period_alias(freq)\n if res is None:\n res = freq\n freq = res\n return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)", + "docstring": "Cast to PeriodArray/PeriodIndex at a particular frequency. Converts DatetimeArray/Index to PeriodArray/PeriodIndex. Parameters ---------- freq : str or Period, optional One of pandas' :ref: or an Period object. Will be inferred by default. Returns ------- PeriodArray/PeriodIndex Immutable ndarray holding ordinal values at a particular frequency. Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame( ... {\"y\": [1, 2, 3]}, ... index=pd.to_datetime( ... [ ... \"2000-03-31 00:00:00\", ... \"2000-05-31 00:00:00\", ... \"2000-08-31 00:00:00\", ... ] ... ), ... ) >>> df.index.to_period(\"M\") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]') Infer the daily frequency >>> idx = pd.date_range(\"2017-01-01\", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", + "ast_data": "FunctionDef name:to_period arg:self arg:freq arguments arg arg If Compare Call Call If Compare Assign BoolOp If BoolOp Call Call Assign Call If Compare Raise Call Assign Call If Compare Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_combiner", + "source_code": "def get_combiner(self):\n raise NotImplementedError('not implemented')", + "docstring": "Returns the embedding combiner.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:get_combiner arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_run_benchmarks", + "source_code": "def _run_benchmarks(regex):\n registry = list(GLOBAL_BENCHMARK_REGISTRY)\n selected_benchmarks = []\n for benchmark in registry:\n benchmark_name = '%s.%s' % (benchmark.__module__, benchmark.__name__)\n attrs = dir(benchmark)\n benchmark_instance = None\n for attr in attrs:\n if not attr.startswith('benchmark'):\n continue\n candidate_benchmark_fn = getattr(benchmark, attr)\n if not callable(candidate_benchmark_fn):\n continue\n full_benchmark_name = '%s.%s' % (benchmark_name, attr)\n if regex == 'all' or re.search(regex, full_benchmark_name):\n selected_benchmarks.append(full_benchmark_name)\n benchmark_instance = benchmark_instance or benchmark()\n instance_benchmark_fn = getattr(benchmark_instance, attr)\n instance_benchmark_fn()\n if not selected_benchmarks:\n raise ValueError(\"No benchmarks matched the pattern: '{}'\".format(regex))", + "docstring": "Run benchmarks that match regex . This function goes through the global benchmark registry, and matches benchmark class and method names of the form to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. Raises: ValueError: If no benchmarks were selected by the input regex.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py", + "ast_data": "FunctionDef name:_run_benchmarks arg:regex arguments arg Assign Call Assign For Assign Assign Call Assign For If Call Assign Call If Call Assign If BoolOp Compare Call Call Assign BoolOp Call Assign Call Call If Raise Call Call" + }, + { + "library": "pytorch", + "name": "shutdown", + "source_code": "def shutdown(self) -> None:\n nodes = []\n for roots in self.roots.values():\n nodes.extend(roots)\n while nodes:\n node = nodes.pop()\n for children in node.children.values():\n nodes.extend(children)\n node.remove_node_cached_tensors()\n node.graph = None\n self.graph = None\n self.roots = None\n self.current_node = None", + "docstring": "Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown to avoid a reference cycle.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:shutdown arg:self arguments arg Assign For Call Call While Assign Call For Call Call Call Assign Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "_validate_estimators", + "source_code": "def _validate_estimators(self):\n if len(self.estimators) == 0:\n raise ValueError(\"Invalid 'estimators' attribute, 'estimators' should be a non-empty list of (string, estimator) tuples.\")\n names, estimators = zip(*self.estimators)\n self._validate_names(names)\n has_estimator = any((est != 'drop' for est in estimators))\n if not has_estimator:\n raise ValueError('All estimators are dropped. At least one is required to be an estimator.')\n return (names, estimators)", + "docstring": "Overload the method of to be more lenient towards the type of . Regressors can be accepted for some cases such as ordinal regression.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", + "ast_data": "FunctionDef name:_validate_estimators arg:self arguments arg If Compare Call Raise Call Assign Call Call Assign Call Compare If Raise Call Return return:yes" + }, + { + "library": "django", + "name": "_check_min_num", + "source_code": "def _check_min_num(self, obj):\n if obj.min_num is None:\n return []\n elif not isinstance(obj.min_num, int):\n return must_be('an integer', option='min_num', obj=obj, id='admin.E205')\n else:\n return []", + "docstring": "Check that min_num is an integer.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_min_num arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Return return:no" + }, + { + "library": "cryptography", + "name": "add_extension", + "source_code": "def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateRevocationListBuilder:\n if not isinstance(extval, ExtensionType):\n raise TypeError('extension must be an ExtensionType')\n extension = Extension(extval.oid, critical, extval)\n _reject_duplicate_extension(extension, self._extensions)\n return CertificateRevocationListBuilder(self._issuer_name, self._last_update, self._next_update, [*self._extensions, extension], self._revoked_certificates)", + "docstring": "Adds an X.509 extension to the certificate revocation list.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\base.py", + "ast_data": "FunctionDef name:add_extension arg:self arg:extval arg:critical arguments arg arg arg If Call Raise Call Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_handler", + "source_code": "def get_handler(self, *args, **options):\n handler = super().get_handler(*args, **options)\n use_static_handler = options['use_static_handler']\n insecure_serving = options['insecure_serving']\n if use_static_handler and (settings.DEBUG or insecure_serving):\n return StaticFilesHandler(handler)\n return handler", + "docstring": "Return the static files serving handler wrapping the default handler, if static files should be served. Otherwise return the default handler.", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\management\\commands\\runserver.py", + "ast_data": "FunctionDef name:get_handler arg:self arguments arg arg arg Assign Call Call Assign Assign If BoolOp BoolOp Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_flat_tensor_shapes", + "source_code": "def get_flat_tensor_shapes(element_spec):\n return [spec.shape for spec in get_flat_tensor_specs(element_spec)]", + "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", + "ast_data": "FunctionDef name:get_flat_tensor_shapes arg:element_spec arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "should_skip_lowering", + "source_code": "def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: dict[str, QConfigAny]):\n return op.name in qconfig_map and qconfig_map[op.name] is None", + "docstring": "Return True if the op is configured with a None qconfig, False otherwise. Note: maybe need to generalize this to also check for the dtype, and we only lower when dtype matches, but right now fbgemm/qnnpack only support a single dtype, so it is OK for now.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py", + "ast_data": "FunctionDef name:should_skip_lowering arg:op arg:qconfig_map arguments arg arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "sphinx", + "name": "CitationReferenceTransform", + "source_code": "class CitationReferenceTransform(SphinxPostTransform):\n default_priority = 5\n formats = ('latex',)\n\n def run(self, **kwargs: Any) -> None:\n domain = self.env.domains.citation_domain\n matcher = NodeMatcher(addnodes.pending_xref, refdomain='citation', reftype='ref')\n for node in matcher.findall(self.document):\n docname, labelid, _ = domain.citations.get(node['reftarget'], ('', '', 0))\n if docname:\n citation_ref = nodes.citation_reference('', '', *node.children, docname=docname, refname=labelid)\n node.replace_self(citation_ref)", + "docstring": "Replace pending_xref nodes for citation by citation_reference. To handle citation reference easily on LaTeX writer, this converts pending_xref nodes to citation_reference.", + "type": "class", + "file_path": "sphinx\\sphinx\\builders\\latex\\transforms.py", + "ast_data": "ClassDef name:CitationReferenceTransform Assign Assign FunctionDef name:run arg:self arguments arg arg Assign Assign Call For Call Assign Call If Assign Call Call" + }, + { + "library": "scipy", + "name": "_ldl_get_d_and_l", + "source_code": "def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):\n is_c = iscomplexobj(ldu)\n d = diag(diag(ldu))\n n = d.shape[0]\n blk_i = 0\n x, y = (1, 0) if lower else (0, 1)\n lu = tril(ldu, -1) if lower else triu(ldu, 1)\n diag_inds = arange(n)\n lu[diag_inds, diag_inds] = 1\n for blk in pivs[pivs != 0]:\n inc = blk_i + blk\n if blk == 2:\n d[blk_i + x, blk_i + y] = ldu[blk_i + x, blk_i + y]\n if is_c and hermitian:\n d[blk_i + y, blk_i + x] = ldu[blk_i + x, blk_i + y].conj()\n else:\n d[blk_i + y, blk_i + x] = ldu[blk_i + x, blk_i + y]\n lu[blk_i + x, blk_i + y] = 0.0\n blk_i = inc\n return (d, lu)", + "docstring": "Helper function to extract the diagonal and triangular matrices for LDL.T factorization. Parameters ---------- ldu : ndarray The compact output returned by the LAPACK routing pivs : ndarray The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For every 2 there is a succeeding 0. lower : bool, optional If set to False, upper triangular part is considered. hermitian : bool, optional If set to False a symmetric complex array is assumed. Returns ------- d : ndarray The block diagonal matrix. lu : ndarray The upper/lower triangular matrix", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp_ldl.py", + "ast_data": "FunctionDef name:_ldl_get_d_and_l arg:ldu arg:pivs arg:lower arg:hermitian arguments arg arg arg arg Assign Call Assign Call Call Assign Assign Assign Assign Call Call Assign Call Assign For Compare Assign If Compare Assign If BoolOp Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "rttopo_version", + "source_code": "def rttopo_version(self):\n return self._get_spatialite_func('rttopo_version()')", + "docstring": "Return the version of RTTOPO library used by SpatiaLite.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py", + "ast_data": "FunctionDef name:rttopo_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_check_flatten_did_not_remove", + "source_code": "def _check_flatten_did_not_remove(original, jit_flattened):\n\n def flatten(x):\n if isinstance(x, (list, tuple)):\n for inner in x:\n yield from flatten(inner)\n elif isinstance(x, dict):\n for inner in x.values():\n yield from flatten(inner)\n else:\n yield x\n flattened_with_none = list(flatten(original))\n num_none = len(flattened_with_none) - len(jit_flattened)\n assert num_none >= 0\n if num_none:\n raise ValueError(f\"args contained {num_none} None's after flattening. When exporting a ScriptModule or ScriptFunction, no args may be None because that breaks type propagation.\")", + "docstring": "torch.jit._flatten removes None. Check if it did so in this case.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\utils.py", + "ast_data": "FunctionDef name:_check_flatten_did_not_remove arg:original arg:jit_flattened arguments arg arg FunctionDef name:flatten arg:x arguments arg If Call For Call If Call For Call Call Assign Call Call Assign Call Call Compare If Raise Call" + }, + { + "library": "django", + "name": "action_checkbox", + "source_code": "def action_checkbox(self, obj):\n attrs = {'class': 'action-select', 'aria-label': format_html(_('Select this object for an action - {}'), str(obj))}\n checkbox = forms.CheckboxInput(attrs, lambda value: False)\n return checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))", + "docstring": "A list_display column containing a checkbox widget.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:action_checkbox arg:self arg:obj arguments arg arg Assign Call Call Call Assign Call arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_is_trivial", + "source_code": "def _is_trivial(node):\n trivial_node_types = (gast.Name, bool, str, gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow, gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd, gast.FloorDiv, gast.Invert, gast.Not, gast.UAdd, gast.USub, gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE, gast.Is, gast.IsNot, gast.In, gast.NotIn, gast.expr_context)\n if isinstance(node, trivial_node_types) and (not _is_py2_name_constant(node)):\n return True\n if gast_util.is_ellipsis(node):\n return True\n return False", + "docstring": "Returns whether to consider the given node 'trivial'. The definition of 'trivial' is a node that can't meaningfully be pulled out into its own assignment statement. This is surprisingly difficult to do robustly across versions of Python and gast, as the parsing of constants has changed, if I may, constantly. Args: node: An AST node to check for triviality Returns: trivial: A Python indicating whether the node is trivial.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py", + "ast_data": "FunctionDef name:_is_trivial arg:node arguments arg Assign If BoolOp Call Call Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "PrecisionConfig", + "source_code": "class PrecisionConfig:\n __slots__ = ('operand_precision',)\n Precision = ops.PrecisionConfig_Precision\n\n def __init__(self):\n self.operand_precision = []", + "docstring": "Python representation of a xla.PrecisionConfig protobuf.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py", + "ast_data": "ClassDef name:PrecisionConfig Assign Assign FunctionDef name:__init__ arg:self arguments arg Assign" + }, + { + "library": "tensorflow", + "name": "convert_legacy_structure", + "source_code": "def convert_legacy_structure(output_types, output_shapes, output_classes):\n flat_types = nest.flatten(output_types)\n flat_shapes = nest.flatten(output_shapes)\n flat_classes = nest.flatten(output_classes)\n flat_ret = []\n for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes, flat_classes):\n if isinstance(flat_class, type_spec.TypeSpec):\n flat_ret.append(flat_class)\n elif issubclass(flat_class, sparse_tensor.SparseTensor):\n flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type))\n elif issubclass(flat_class, tensor_lib.Tensor):\n flat_ret.append(tensor_lib.TensorSpec(flat_shape, flat_type))\n elif issubclass(flat_class, tensor_array_ops.TensorArray):\n flat_ret.append(tensor_array_ops.TensorArraySpec(flat_shape[2:], flat_type, dynamic_size=tensor_shape.dimension_value(flat_shape[0]), infer_shape=tensor_shape.dimension_value(flat_shape[1])))\n else:\n raise TypeError('Could not build a structure for output class {}. Make sure any component class in `output_classes` inherits from one of the following classes: `tf.TypeSpec`, `tf.sparse.SparseTensor`, `tf.Tensor`, `tf.TensorArray`.'.format(flat_class.__name__))\n return nest.pack_sequence_as(output_classes, flat_ret)", + "docstring": "Returns a that represents the given legacy structure. This method provides a way to convert from the existing and structure-related properties to a object. A \"legacy\" structure is represented by the , , and properties. TODO(b/110122868): Remove this function once is used throughout . Args: output_types: A nested structure of objects corresponding to each component of a structured value. output_shapes: A nested structure of objects corresponding to each component a structured value. output_classes: A nested structure of Python objects corresponding to each component of a structured value. Returns: A . Raises: TypeError: If a structure cannot be built from the arguments, because one of the component classes in is not supported.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", + "ast_data": "FunctionDef name:convert_legacy_structure arg:output_types arg:output_shapes arg:output_classes arguments arg arg arg Assign Call Assign Call Assign Call Assign For Call If Call Call If Call Call Call If Call Call Call If Call Call Call Call Call Raise Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_texts", + "source_code": "def get_texts(self):\n return silent_list('Text', self.texts)", + "docstring": "Return the list of \\s in the legend.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:get_texts arg:self arguments arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "collide_rect_ratio", + "source_code": "class collide_rect_ratio:\n\n def __init__(self, ratio):\n self.ratio = ratio\n\n def __repr__(self):\n return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))\n\n def __call__(self, left, right):\n ratio = self.ratio\n leftrect = left.rect\n width = leftrect.width\n height = leftrect.height\n leftrect = leftrect.inflate(width * ratio - width, height * ratio - height)\n rightrect = right.rect\n width = rightrect.width\n height = rightrect.height\n rightrect = rightrect.inflate(width * ratio - width, height * ratio - height)\n return leftrect.colliderect(rightrect)", + "docstring": "A callable class that checks for collisions using scaled rects The class checks for collisions between two sprites using a scaled version of the sprites' rects. Is created with a ratio; the instance is then intended to be passed as a collided callback function to the *collide functions. New in pygame 1.8.1", + "type": "class", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "ClassDef name:collide_rect_ratio FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:__call__ arg:self arg:left arg:right arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_unsorted_segment_N", + "source_code": "def _unsorted_segment_N(data, segment_ids, num_segments):\n num_segments = ops.convert_to_tensor(num_segments)\n segment_ids_shape = array_ops.shape_internal(segment_ids)\n ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n broadcastable_shape = array_ops.concat([num_segments[array_ops.newaxis], array_ops.ones([array_ops.rank(data) - array_ops.rank(segment_ids)], dtype=num_segments.dtype)], axis=0)\n n = array_ops.reshape(n, broadcastable_shape)\n return gen_math_ops.maximum(n, 1)", + "docstring": "Helper function for unsorted_segment_mean/_sqrtN. Computes the number of segment entries with 0-entries set to 1 to allow division by N. Args: data: A with data that will be assembled in the output. segment_ids: An integer tensor whose shape is a prefix of . The values must be in the range . The values are always validated to be in range on CPU, never validated on TPU/GPU. num_segments: An integer scalar . The number of distinct segment IDs. Returns: A with the number of segment entries with 0-entries set to 1.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:_unsorted_segment_N arg:data arg:segment_ids arg:num_segments arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "filter2d_separable", + "source_code": "def filter2d_separable(input: Tensor, kernel_x: Tensor, kernel_y: Tensor, border_type: str='reflect', normalized: bool=False, padding: str='same') -> Tensor:\n out_x = filter2d(input, kernel_x[..., None, :], border_type, normalized, padding)\n out = filter2d(out_x, kernel_y[..., None], border_type, normalized, padding)\n return out", + "docstring": "Convolve a tensor with two 1d kernels, in x and y directions. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:. kernel_x: the kernel to be convolved with the input tensor. The kernel shape must be :math: or :math:. kernel_y: the kernel to be convolved with the input tensor. The kernel shape must be :math: or :math:. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3) >>> filter2d_separable(input, kernel, kernel, padding='same') tensor([[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\filter.py", + "ast_data": "FunctionDef name:filter2d_separable arg:input arg:kernel_x arg:kernel_y arg:border_type arg:normalized arg:padding arguments arg arg arg arg arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "CompiledFxGraphLoadable", + "source_code": "@dataclass\nclass CompiledFxGraphLoadable(InductorOutput[CompiledFxGraph]):\n result: CompiledFxGraph\n\n def pre_save(self) -> None:\n disk_compiled_graph = copy(self.result)\n disk_compiled_graph.prepare_for_serialization()\n self.result = disk_compiled_graph\n return\n\n def load(self, example_inputs) -> CompiledFxGraph:\n self.example_inputs = example_inputs\n return self.result\n\n def post_compile(self, result: CompiledFxGraph, fx_config: _CompileFxKwargs) -> CompiledFxGraph:\n constants = CompiledFxGraphConstants()\n graph, cache_info = FxGraphCache.cache_hit_post_compile(result, {}, constants)\n if graph is None:\n raise BypassAOTAutogradCache('Failed to reload cache entry from disk')\n torch._logging.trace_structured('artifact', metadata_fn=lambda: {'name': 'fx_graph_bundled_cache_hit', 'encoding': 'json'}, payload_fn=lambda: json.dumps(cache_info))\n counters['inductor']['fxgraph_cache_hit'] += 1\n graph.post_compile(self.example_inputs, constants, fx_config)\n return graph", + "docstring": "A full compiled fx graph that doesn't need to lookup the FxGraphCache to run", + "type": "class", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "ClassDef name:CompiledFxGraphLoadable FunctionDef name:pre_save arg:self arguments arg Assign Call Call Assign Return return:no FunctionDef name:load arg:self arg:example_inputs arguments arg arg Assign Return return:yes FunctionDef name:post_compile arg:self arg:result arg:fx_config arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call arguments arguments Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_r2rn", + "source_code": "def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n tmp = _asfarray(x)\n shape, axes = _init_nd_shape_and_axes(tmp, s, axes)\n overwrite_x = overwrite_x or _datacopied(tmp, x)\n if len(axes) == 0:\n return x\n tmp, copied = _fix_shape(tmp, shape, axes)\n overwrite_x = overwrite_x or copied\n if not forward:\n if type == 2:\n type = 3\n elif type == 3:\n type = 2\n norm = _normalization(norm, forward)\n workers = _workers(workers)\n out = tmp if overwrite_x else None\n if np.iscomplexobj(x):\n out = np.empty_like(tmp) if out is None else out\n transform(tmp.real, type, axes, norm, out.real, workers)\n transform(tmp.imag, type, axes, norm, out.imag, workers)\n return out\n return transform(tmp, type, axes, norm, out, workers, orthogonalize)", + "docstring": "Forward or backward nd DCT/DST Parameters ---------- forward : bool Transform direction (determines type and normalisation) transform : {pypocketfft.dct, pypocketfft.dst} The transform to perform", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_pocketfft\\realtransforms.py", + "ast_data": "FunctionDef name:_r2rn arg:forward arg:transform arg:x arg:type arg:s arg:axes arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign BoolOp Call If Compare Call Return return:yes Assign Call Assign BoolOp If If Compare Assign If Compare Assign Assign Call Assign Call Assign If Call Assign Compare Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "scipy", + "name": "find_active_constraints", + "source_code": "def find_active_constraints(x, lb, ub, rtol=1e-10):\n active = np.zeros_like(x, dtype=int)\n if rtol == 0:\n active[x <= lb] = -1\n active[x >= ub] = 1\n return active\n lower_dist = x - lb\n upper_dist = ub - x\n lower_threshold = rtol * np.maximum(1, np.abs(lb))\n upper_threshold = rtol * np.maximum(1, np.abs(ub))\n lower_active = np.isfinite(lb) & (lower_dist <= np.minimum(upper_dist, lower_threshold))\n active[lower_active] = -1\n upper_active = np.isfinite(ub) & (upper_dist <= np.minimum(lower_dist, upper_threshold))\n active[upper_active] = 1\n return active", + "docstring": "Determine which constraints are active in a given point. The threshold is computed using and the absolute value of the closest bound. Returns ------- active : ndarray of int with shape of x Each component shows whether the corresponding constraint is active: * 0 - a constraint is not active. * -1 - a lower bound is active. * 1 - a upper bound is active.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:find_active_constraints arg:x arg:lb arg:ub arg:rtol arguments arg arg arg arg Assign Call If Compare Assign Compare Assign Compare Return return:yes Assign Assign Assign Call Call Assign Call Call Assign Call Compare Call Assign Assign Call Compare Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_ensure_c_contiguous", + "source_code": "def _ensure_c_contiguous(self):\n if not self.x.flags.c_contiguous:\n self.x = self.x.copy()\n if not self.c.flags.c_contiguous:\n self.c = self.c.copy()", + "docstring": "c and x may be modified by the user. The Cython code expects that they are C contiguous.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:_ensure_c_contiguous arg:self arguments arg If Assign Call If Assign Call" + }, + { + "library": "numpy", + "name": "MGridClass", + "source_code": "class MGridClass(nd_grid):\n __slots__ = ()\n\n def __init__(self):\n super().__init__(sparse=False)", + "docstring": "An instance which returns a dense multi-dimensional \"meshgrid\". An instance which returns a dense (or fleshed out) mesh-grid when indexed, so that each returned argument has the same shape. The dimensions and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. Returns ------- mesh-grid : ndarray A single array, containing a set of \\ s all of the same dimensions. stacked along the first axis. See Also -------- ogrid : like but returns open (not fleshed out) mesh grids meshgrid: return coordinate matrices from coordinate vectors r_ : array concatenator :ref: Examples -------- >>> import numpy as np >>> np.mgrid[0:5, 0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> np.mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> np.mgrid[0:4].shape (4,) >>> np.mgrid[0:4, 0:5].shape (2, 4, 5) >>> np.mgrid[0:4, 0:5, 0:6].shape (3, 4, 5, 6)", + "type": "class", + "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py", + "ast_data": "ClassDef name:MGridClass Assign FunctionDef name:__init__ arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "_InfiniteConstantSampler", + "source_code": "class _InfiniteConstantSampler(Sampler):\n\n def __iter__(self):\n while True:\n yield None", + "docstring": "Analogous to `~torch.utils.data.IterableDataset`.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\dataloader.py", + "ast_data": "ClassDef name:_InfiniteConstantSampler FunctionDef name:__iter__ arg:self arguments arg While" + }, + { + "library": "django", + "name": "generate_added_fields", + "source_code": "def generate_added_fields(self):\n for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):\n self._generate_added_field(app_label, model_name, field_name)", + "docstring": "Make AddField operations.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\autodetector.py", + "ast_data": "FunctionDef name:generate_added_fields arg:self arguments arg For Call Call" + }, + { + "library": "django", + "name": "media", + "source_code": "def media(request):\n return {'MEDIA_URL': settings.MEDIA_URL}", + "docstring": "Add media-related context variables to the context.", + "type": "function", + "file_path": "django\\django\\template\\context_processors.py", + "ast_data": "FunctionDef name:media arg:request arguments arg Return return:yes" + }, + { + "library": "django", + "name": "geom_count", + "source_code": "@property\ndef geom_count(self):\n return capi.get_geom_count(self.ptr)", + "docstring": "Return the number of elements in this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:geom_count arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_devrelease", + "source_code": "@property\ndef is_devrelease(self) -> bool:\n return self.dev is not None", + "docstring": "Whether this version is a development release. >>> Version(\"1.2.3\").is_devrelease False >>> Version(\"1.2.3.dev1\").is_devrelease True", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:is_devrelease arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "executions", + "source_code": "def executions(self, digest=False, begin=None, end=None):\n digests = self._execution_digests\n if begin is not None or end is not None:\n begin = begin or 0\n end = end or len(digests)\n digests = digests[begin:end]\n if digest:\n return digests\n else:\n return [self.read_execution(digest) for digest in digests]", + "docstring": "Get s or s this reader has read so far. Args: digest: Whether the results are returned in a digest form, i.e., format, instead of the more detailed format. begin: Optional beginning index for the requested execution data objects or their digests. Python-style negative indices are supported. end: Optional ending index for the requested execution data objects or their digests. Python-style negative indices are supported. Returns: If : a of objects. Else: a of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:executions arg:self arg:digest arg:begin arg:end arguments arg arg arg arg Assign If BoolOp Compare Compare Assign BoolOp Assign BoolOp Call Assign If Return return:yes Return return:yes Call" + }, + { + "library": "authlib", + "name": "headers", + "source_code": "@property\ndef headers(self):\n if self.type == 'json':\n return self['header']", + "docstring": "Alias of `` for JSON typed JWS.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py", + "ast_data": "FunctionDef name:headers arg:self arguments arg If Compare Return return:yes" + }, + { + "library": "django", + "name": "add_annotation", + "source_code": "def add_annotation(self, annotation, alias, select=True):\n self.check_alias(alias)\n annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None)\n if select:\n self.append_annotation_mask([alias])\n else:\n self.set_annotation_mask(set(self.annotation_select).difference({alias}))\n self.annotations[alias] = annotation\n if select and self.selected:\n self.selected[alias] = alias", + "docstring": "Add a single annotation expression to the Query.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:add_annotation arg:self arg:annotation arg:alias arg:select arguments arg arg arg arg Call Assign Call If Call Call Call Call Assign If BoolOp Assign" + }, + { + "library": "django", + "name": "SuspiciousOperation", + "source_code": "class SuspiciousOperation(Exception):\n pass", + "docstring": "The user did something suspicious", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:SuspiciousOperation" + }, + { + "library": "tensorflow", + "name": "activity_regularizer", + "source_code": "@activity_regularizer.setter\ndef activity_regularizer(self, regularizer):\n self._activity_regularizer = regularizer", + "docstring": "Optional regularizer function for the output of this layer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:activity_regularizer arg:self arg:regularizer arguments arg arg Assign" + }, + { + "library": "sphinx", + "name": "FiletypeNotFoundError", + "source_code": "class FiletypeNotFoundError(Exception):\n pass", + "docstring": "Raised by get_filetype() if a filename matches no source suffix.", + "type": "class", + "file_path": "sphinx\\sphinx\\errors.py", + "ast_data": "ClassDef name:FiletypeNotFoundError" + }, + { + "library": "tensorflow", + "name": "calc_control_outputs", + "source_code": "def calc_control_outputs(self, graph):\n control_outputs = {}\n for op in graph.get_operations():\n for control_input in op.control_inputs:\n if control_input not in control_outputs:\n control_outputs[control_input] = set()\n control_outputs[control_input].add(op)\n return control_outputs", + "docstring": "Returns the map of control_outputs for a given graph. Args: graph: The graph to parse. Returns: A map of the control outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py", + "ast_data": "FunctionDef name:calc_control_outputs arg:self arg:graph arguments arg arg Assign For Call For If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_layouts_same", + "source_code": "def is_layouts_same(self, embedding_layouts) -> bool:\n if self._checkpoint_layouts.keys() != embedding_layouts.keys():\n raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys()))\n for key, layout in self._checkpoint_layouts.items():\n if not compare.ProtoEq(layout, embedding_layouts[key]):\n logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key])\n return False\n return True", + "docstring": "Returns True if the all the embedding and checkpoint layouts are the same. Args: embedding_layouts: dict of layouts for embedding tables. Raises: ValueError if the embedding layouts and checkpoint layouts do not have the same keys. Returns: Bool representing if the embedding layouts match the layouts in checkpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", + "ast_data": "FunctionDef name:is_layouts_same arg:self arg:embedding_layouts arguments arg arg If Compare Call Call Raise Call Call Call Call For Call If Call Call Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "transform_content", + "source_code": "def transform_content(self, content_node: addnodes.desc_content) -> None:\n field_list = nodes.field_list()\n if 'type' in self.options:\n field, msgs = self.format_type(self.options['type'])\n field_list.append(field)\n field_list += msgs\n if 'default' in self.options:\n field, msgs = self.format_default(self.options['default'])\n field_list.append(field)\n field_list += msgs\n if len(field_list.children) > 0:\n content_node.insert(0, field_list)", + "docstring": "Insert *type* and *default* as a field list.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", + "ast_data": "FunctionDef name:transform_content arg:self arg:content_node arguments arg arg Assign Call If Compare Assign Call Call If Compare Assign Call Call If Compare Call Call" + }, + { + "library": "scipy", + "name": "interval_censored", + "source_code": "@classmethod\ndef interval_censored(cls, low, high):\n _validate_1d(low, 'low', allow_inf=True)\n _validate_1d(high, 'high', allow_inf=True)\n if len(low) != len(high):\n raise ValueError('`low` and `high` must have the same length.')\n interval = np.column_stack((low, high))\n uncensored, left, right, interval = _validate_interval(interval)\n return cls(uncensored=uncensored, left=left, right=right, interval=interval)", + "docstring": "Create a instance of interval-censored data. This method is useful when all the data is interval-censored, and the low and high ends of the intervals are already stored in separate one-dimensional arrays. Parameters ---------- low : array_like The one-dimensional array containing the low ends of the intervals. high : array_like The one-dimensional array containing the high ends of the intervals. Returns ------- data : An instance of that represents the collection of censored values. Examples -------- >>> import numpy as np >>> from scipy.stats import CensoredData `` are the low and high ends of a collection of interval-censored values. >>> a = [0.5, 2.0, 3.0, 5.5] >>> b = [1.0, 2.5, 3.5, 7.0] >>> data = CensoredData.interval_censored(low=a, high=b) >>> print(data) CensoredData(4 values: 0 not censored, 4 interval-censored)", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_censored_data.py", + "ast_data": "FunctionDef name:interval_censored arg:cls arg:low arg:high arguments arg arg arg Call Call If Compare Call Call Raise Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "num_buckets", + "source_code": "@property\ndef num_buckets(self):\n return self.categorical_column.num_buckets", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "toolmanager_disconnect", + "source_code": "def toolmanager_disconnect(self, cid):\n return self._callbacks.disconnect(cid)", + "docstring": "Disconnect callback id *cid*. Example usage:: cid = toolmanager.toolmanager_connect('tool_trigger_zoom', onpress) #...later toolmanager.toolmanager_disconnect(cid)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:toolmanager_disconnect arg:self arg:cid arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "OneToOneRel", + "source_code": "class OneToOneRel(ManyToOneRel):\n\n def __init__(self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None):\n super().__init__(field, to, field_name, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete)\n self.multiple = False", + "docstring": "Used by OneToOneField to store information about the relation. `` returns this class to provide access to the field flags for the reverse relation.", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\reverse_related.py", + "ast_data": "ClassDef name:OneToOneRel FunctionDef name:__init__ arg:self arg:field arg:to arg:field_name arg:related_name arg:related_query_name arg:limit_choices_to arg:parent_link arg:on_delete arguments arg arg arg arg arg arg arg arg arg Call Call Assign" + }, + { + "library": "kornia", + "name": "to_onnx", + "source_code": "def to_onnx(self, onnx_name: Optional[str]=None, image_size: Optional[int]=352, include_pre_and_post_processor: bool=True, save: bool=True, additional_metadata: Optional[list[tuple[str, str]]]=None, **kwargs: Any) -> onnx.ModelProto:\n if onnx_name is None:\n onnx_name = f'kornia_{self.name}_{image_size}.onnx'\n return super().to_onnx(onnx_name, input_shape=[-1, 3, image_size or -1, image_size or -1], output_shape=[-1, 1, image_size or -1, image_size or -1], pseudo_shape=[1, 3, image_size or 352, image_size or 352], model=self if include_pre_and_post_processor else self.model, save=save, additional_metadata=additional_metadata, **kwargs)", + "docstring": "Export the current edge detection model to an ONNX model file. Args: onnx_name: The name of the output ONNX file. If not provided, a default name in the format \"Kornia-.onnx\" will be used. image_size: The size to which input images will be resized during preprocessing. If None, image_size will be dynamic. For DexiNed, recommended scale is 352. include_pre_and_post_processor: Whether to include the pre-processor and post-processor in the exported model. save: If to save the model or load it. additional_metadata: Additional metadata to add to the ONNX model. kwargs: Additional arguments to convert to onnx.", + "type": "method", + "file_path": "kornia\\kornia\\models\\edge_detection\\base.py", + "ast_data": "FunctionDef name:to_onnx arg:self arg:onnx_name arg:image_size arg:include_pre_and_post_processor arg:save arg:additional_metadata arguments arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call BoolOp BoolOp BoolOp BoolOp BoolOp BoolOp" + }, + { + "library": "cryptography", + "name": "SignatureAlgorithm", + "source_code": "class SignatureAlgorithm(utils.Enum):\n ANONYMOUS = 0\n RSA = 1\n DSA = 2\n ECDSA = 3", + "docstring": "Signature algorithms that are valid for SCTs. These are exactly the same as SignatureAlgorithm in RFC 5246 (TLS 1.2). See:", + "type": "class", + "file_path": "cryptography\\src\\cryptography\\x509\\certificate_transparency.py", + "ast_data": "ClassDef name:SignatureAlgorithm Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "reset_state", + "source_code": "def reset_state(self):\n if not self._built:\n return\n metrics = [self._loss_metric] + nest.flatten(self._per_output_metrics)\n for metric_obj in metrics:\n if metric_obj is not None:\n metric_obj.reset_state()", + "docstring": "Resets the state of loss metrics.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:reset_state arg:self arguments arg If Return return:no Assign Call For If Compare Call" + }, + { + "library": "tensorflow", + "name": "is_composite_or_composite_value", + "source_code": "def is_composite_or_composite_value(tensor):\n return isinstance(tensor, (composite_tensor.CompositeTensor, sparse_tensor.SparseTensorValue, ragged_tensor_value.RaggedTensorValue))", + "docstring": "Returns true if 'tensor' is a CompositeTensor or a CT Value object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:is_composite_or_composite_value arg:tensor arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_matches_version", + "source_code": "def _matches_version(actual_version, required_version):\n if actual_version is None:\n return False\n actual_version = actual_version.strip()\n required_version = required_version.strip()\n return actual_version.startswith(required_version)", + "docstring": "Checks whether some version meets the requirements. All elements of the required_version need to be present in the actual_version. required_version actual_version result ----------------------------------------- 1 1.1 True 1.2 1 False 1.2 1.3 False 1 True Args: required_version: The version specified by the user. actual_version: The version detected from the CUDA installation. Returns: Whether the actual version matches the required one.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py", + "ast_data": "FunctionDef name:_matches_version arg:actual_version arg:required_version arguments arg arg If Compare Return return:yes Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "RegistrationHandle", + "source_code": "class RegistrationHandle:\n\n def __init__(self, on_destroy: Callable):\n self._on_destroy = on_destroy\n\n def destroy(self) -> None:\n self._on_destroy()", + "docstring": "Does something when someone calls .destroy() on it", + "type": "class", + "file_path": "pytorch\\torch\\_library\\utils.py", + "ast_data": "ClassDef name:RegistrationHandle FunctionDef name:__init__ arg:self arg:on_destroy arguments arg arg Assign FunctionDef name:destroy arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_LoopCondGrad", + "source_code": "@ops.RegisterGradient('LoopCond')\ndef _LoopCondGrad(_):\n return None", + "docstring": "Stop backprop for the predicate of a while loop.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py", + "ast_data": "FunctionDef name:_LoopCondGrad arg:_ arguments arg Return return:no Call" + }, + { + "library": "pandas", + "name": "_validate_scalar", + "source_code": "def _validate_scalar(self, fill_value):\n if is_valid_na_for_dtype(fill_value, self.categories.dtype):\n fill_value = -1\n elif fill_value in self.categories:\n fill_value = self._unbox_scalar(fill_value)\n else:\n raise TypeError(f'Cannot setitem on a Categorical with a new category ({fill_value}), set the categories first') from None\n return fill_value", + "docstring": "Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. Parameters ---------- fill_value : object Returns ------- fill_value : int Raises ------ TypeError", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_validate_scalar arg:self arg:fill_value arguments arg arg If Call Assign If Compare Assign Call Raise Call Return return:yes" + }, + { + "library": "numpy", + "name": "filled", + "source_code": "def filled(self, fill_value=None):\n return asarray(self).filled(fill_value)[()]", + "docstring": "Return a copy with masked fields filled with a given value. Parameters ---------- fill_value : array_like, optional The value to use for invalid entries. Can be scalar or non-scalar. If latter is the case, the filled array should be broadcastable over input array. Default is None, in which case the attribute is used instead. Returns ------- filled_void A object See Also -------- MaskedArray.filled", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:filled arg:self arg:fill_value arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "trigger", + "source_code": "def trigger(self, sender, event, data=None):\n if not self.figure.canvas.widgetlock.available(sender):\n return\n if data is not None:\n self.draw_rubberband(*data)\n else:\n self.remove_rubberband()", + "docstring": "Call or based on data.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Return return:no If Compare Call Call" + }, + { + "library": "scipy", + "name": "get_id", + "source_code": "def get_id(self):\n return self.id", + "docstring": "The identifier of the target node. For `ii`. Returns ------- id : int The identifier of the target node.", + "type": "method", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:get_id arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "run_5point", + "source_code": "def run_5point(points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:\n KORNIA_CHECK_SHAPE(points1, ['B', 'N', '2'])\n KORNIA_CHECK_SAME_SHAPE(points1, points2)\n KORNIA_CHECK(points1.shape[1] >= 5, 'Number of points should be >=5')\n if weights is not None:\n KORNIA_CHECK_SAME_SHAPE(points1[:, :, 0], weights)\n batch_size, _, _ = points1.shape\n x1, y1 = torch.chunk(points1, dim=-1, chunks=2)\n x2, y2 = torch.chunk(points2, dim=-1, chunks=2)\n ones = ones_like(x1)\n X = torch.cat([x1 * x2, x1 * y2, x1, y1 * x2, y1 * y2, y1, x2, y2, ones], dim=-1)\n if weights is None:\n X = X.transpose(-2, -1) @ X\n else:\n w_diag = torch.diag_embed(weights)\n X = X.transpose(-2, -1) @ w_diag @ X\n E_Nister = null_to_Nister_solution(X, batch_size)\n return E_Nister", + "docstring": "Compute the essential matrix using the 5-point algorithm from Nister. The linear system is solved by Nister's 5-point algorithm [@nister2004efficient], and the solver implemented referred to [@barath2020magsac++][@wei2023generalized][@wang2023vggsfm]. Args: points1: A set of carlibrated points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Returns: the computed essential matrix with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py", + "ast_data": "FunctionDef name:run_5point arg:points1 arg:points2 arg:weights arguments arg arg arg Call Call Call Compare If Compare Call Assign Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_nccl_available", + "source_code": "def is_nccl_available() -> bool:\n return _NCCL_AVAILABLE", + "docstring": "Check if the NCCL backend is available.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:is_nccl_available arguments Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, *, angle=0.0, rotation_point='xy', **kwargs):\n super().__init__(**kwargs)\n self._x0 = xy[0]\n self._y0 = xy[1]\n self._width = width\n self._height = height\n self.angle = float(angle)\n self.rotation_point = rotation_point\n self._aspect_ratio_correction = 1.0\n self._convert_units()", + "docstring": "Parameters ---------- xy : (float, float) The anchor point. width : float Rectangle width. height : float Rectangle height. angle : float, default: 0 Rotation in degrees anti-clockwise about the rotation point. rotation_point : {'xy', 'center', (number, number)}, default: 'xy' If `~matplotlib.patches.Patch` properties %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Call Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "LossFunctionWrapper", + "source_code": "class LossFunctionWrapper(Loss):\n\n def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs):\n super().__init__(reduction=reduction, name=name)\n self.fn = fn\n self._fn_kwargs = kwargs\n\n def call(self, y_true, y_pred):\n if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n return ag_fn(y_true, y_pred, **self._fn_kwargs)\n\n def get_config(self):\n config = {}\n for k, v in self._fn_kwargs.items():\n config[k] = backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))", + "docstring": "Wraps a loss function in the class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "ClassDef name:LossFunctionWrapper FunctionDef name:__init__ arg:self arg:fn arg:reduction arg:name arguments arg arg arg arg arg Call Call Assign Assign FunctionDef name:call arg:self arg:y_true arg:y_pred arguments arg arg arg If BoolOp Call Call Assign Call Assign Call Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign For Call Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "read_value", + "source_code": "def read_value(self):\n raise NotImplementedError", + "docstring": "Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A containing the value of the variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:read_value arg:self arguments arg Raise" + }, + { + "library": "matplotlib", + "name": "ToolYScale", + "source_code": "class ToolYScale(AxisScaleBase):\n description = 'Toggle scale Y axis'\n default_keymap = property(lambda self: mpl.rcParams['keymap.yscale'])\n\n def set_scale(self, ax, scale):\n ax.set_yscale(scale)", + "docstring": "Tool to toggle between linear and logarithmic scales on the Y axis.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "ClassDef name:ToolYScale Assign Assign Call arguments arg FunctionDef name:set_scale arg:self arg:ax arg:scale arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "solve", + "source_code": "def solve(self, rhs, adjoint=False, adjoint_arg=False, name='solve'):\n if self.is_non_singular is False:\n raise NotImplementedError('Exact solve not implemented for an operator that is expected to be singular.')\n if self.is_square is False:\n raise NotImplementedError('Exact solve not implemented for an operator that is expected to not be square.')\n if isinstance(rhs, LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = rhs.adjoint() if adjoint_arg else rhs\n if right_operator.range_dimension is not None and left_operator.domain_dimension is not None and (right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError('Operators are incompatible. Expected `rhs` to have dimension {} but got {}.'.format(left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name):\n return self._linop_solve(left_operator, right_operator)\n with self._name_scope(name):\n rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n self._check_input_dtype(rhs)\n self_dim = -1 if adjoint else -2\n arg_dim = -1 if adjoint_arg else -2\n tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(rhs.shape[arg_dim])\n return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)", + "docstring": "Solve (exact or approx) (batch) systems of equations: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator and compatible shape. is treated like a [batch] matrix meaning for every set of leading dimensions, the last two dimensions defines a matrix. See class docstring for definition of compatibility. adjoint: Python . If , solve the system involving the adjoint of this : . adjoint_arg: Python . If , solve where is the hermitian transpose (transposition and complex conjugation). name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:solve arg:self arg:rhs arg:adjoint arg:adjoint_arg arg:name arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Call Assign Call Assign Call If BoolOp Compare Compare Compare Raise Call Call With Call Return return:yes Call With Call Assign Call Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_export_to_saved_model_graph", + "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n _, _, _ = (object_map, tensor_map, options)\n del kwargs\n return []", + "docstring": "Creates a copy of this object's tensors onto SavedModel graph. Needs to be overridden if the class contains tensors that must be saved into the graph. This method should update the and dictionaries. This method is called on all nodes in the Trackable Graph (generated by ). The nodes are traversed in the order defined by All usages of _map_resources should be migrated to this method. Args: object_map: A dictionary that maps original Trackables to the copied Trackables. This only needs to be updated if the object is a tf.function, or if the copied tensors are necessary for checkpointing this object. tensor_map: Dictionary mapping original tensors to copied tensors. options: A object. **kwargs: Additional kwargs that may be added at a later time. Returns: Flat list of original tensors that have been copied.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py", + "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign Return return:no" + }, + { + "library": "pandas", + "name": "_fill_limit_area_1d", + "source_code": "def _fill_limit_area_1d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None:\n neg_mask = ~mask\n first = neg_mask.argmax()\n last = len(neg_mask) - neg_mask[::-1].argmax() - 1\n if limit_area == 'inside':\n mask[:first] = False\n mask[last + 1:] = False\n elif limit_area == 'outside':\n mask[first + 1:last] = False", + "docstring": "Prepare 1d mask for ffill/bfill with limit_area. Caller is responsible for checking at least one value of mask is False. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { \"outside\", \"inside\" } Whether to limit filling to outside or inside the outer most non-NA value.", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:_fill_limit_area_1d arg:mask arg:limit_area arguments arg arg Assign Assign Call Assign Call Call If Compare Assign Assign If Compare Assign" + }, + { + "library": "scipy", + "name": "_params_dunnett", + "source_code": "def _params_dunnett(samples: list[np.ndarray], control: np.ndarray) -> tuple[np.ndarray, int, int, np.ndarray, int]:\n n_samples = np.array([sample.size for sample in samples])\n n_sample = n_samples.sum()\n n_control = control.size\n n = n_sample + n_control\n n_groups = len(samples)\n df = n - n_groups - 1\n rho = n_control / n_samples + 1\n rho = 1 / np.sqrt(rho[:, None] * rho[None, :])\n np.fill_diagonal(rho, 1)\n return (rho, df, n_groups, n_samples, n_control)", + "docstring": "Specific parameters for Dunnett's test. Degree of freedom is the number of observations minus the number of groups including the control.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_multicomp.py", + "ast_data": "FunctionDef name:_params_dunnett arg:samples arg:control arguments arg arg Assign Call Assign Call Assign Assign Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "find_result_type", + "source_code": "def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:\n new_dtype: DtypeObj\n if isinstance(left_dtype, np.dtype) and left_dtype.kind in 'iuc' and (lib.is_integer(right) or lib.is_float(right)):\n if lib.is_float(right) and right.is_integer() and (left_dtype.kind != 'f'):\n right = int(right)\n if isinstance(right, int) and (not isinstance(right, np.integer)):\n right_dtype = np.min_scalar_type(right)\n if right == 0:\n right = left_dtype\n elif not np.issubdtype(left_dtype, np.unsignedinteger) and 0 < right <= np.iinfo(right_dtype).max:\n right = np.dtype(f'i{right_dtype.itemsize}')\n else:\n right = right_dtype\n new_dtype = np.result_type(left_dtype, right)\n elif is_valid_na_for_dtype(right, left_dtype):\n new_dtype = ensure_dtype_can_hold_na(left_dtype)\n else:\n dtype, _ = infer_dtype_from(right)\n new_dtype = find_common_type([left_dtype, dtype])\n return new_dtype", + "docstring": "Find the type/dtype for the result of an operation between objects. This is similar to find_common_type, but looks at the right object instead of just its dtype. This can be useful in particular when the right object does not have a . Parameters ---------- left_dtype : np.dtype or ExtensionDtype right : Any Returns ------- np.dtype or ExtensionDtype See also -------- find_common_type numpy.result_type", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:find_result_type arg:left_dtype arg:right arguments arg arg If BoolOp Call Compare BoolOp Call Call If BoolOp Call Call Compare Assign Call If BoolOp Call Call Assign Call If Compare Assign If BoolOp Call Compare Call Assign Call Assign Assign Call If Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "Schaffer03", + "source_code": "class Schaffer03(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.custom_bounds = [(-10, 10), (-10, 10)]\n self.global_optimum = [[0.0, 1.253115]]\n self.fglob = 0.00156685\n\n def fun(self, x, *args):\n self.nfev += 1\n num = sin(cos(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5\n den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2\n return 0.5 + num / den", + "docstring": "Schaffer 3 objective function. This class defines the Schaffer 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer03}}(x) = 0.5 + \\frac{\\sin^2 \\left( \\cos \\lvert x_1^2 - x_2^2 \\rvert \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Schaffer03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_expand_variables", + "source_code": "def _expand_variables(input_str, cmake_vars):\n\n def replace(match):\n if match.group(1) in cmake_vars:\n return cmake_vars[match.group(1)]\n return ''\n return _CMAKE_ATVAR_REGEX.sub(replace, _CMAKE_VAR_REGEX.sub(replace, input_str))", + "docstring": "Expands ${VARIABLE}s and @VARIABLE@s in 'input_str', using dictionary 'cmake_vars'. Args: input_str: the string containing ${VARIABLE} or @VARIABLE@ expressions to expand. cmake_vars: a dictionary mapping variable names to their values. Returns: The expanded string.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\llvm_openmp\\expand_cmake_vars.py", + "ast_data": "FunctionDef name:_expand_variables arg:input_str arg:cmake_vars arguments arg arg FunctionDef name:replace arg:match arguments arg If Compare Call Return return:yes Call Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_transform", + "source_code": "def get_transform(self):\n return self.aux_transform + self.ref_offset_transform + self.offset_transform", + "docstring": "Return the :class: applied to the children", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "randint64", + "source_code": "def randint64(self, seed: T, offset: T, low: T, high: T) -> T:\n raise NotImplementedError", + "docstring": "Computes inductor_prims.randint. offset has dtype int32.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:randint64 arg:self arg:seed arg:offset arg:low arg:high arguments arg arg arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_init_ready_op", + "source_code": "def _init_ready_op(self, ready_op=USE_DEFAULT, ready_for_local_init_op=USE_DEFAULT):\n if ready_op is Supervisor.USE_DEFAULT:\n ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)\n if ready_op is None:\n ready_op = variables.report_uninitialized_variables()\n ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n self._ready_op = ready_op\n if ready_for_local_init_op is Supervisor.USE_DEFAULT:\n ready_for_local_init_op = self._get_first_op_from_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)\n self._ready_for_local_init_op = ready_for_local_init_op", + "docstring": "Initializes ready_op. Args: ready_op: to check if the model is initialized. If it's set to USE_DEFAULT, creates an op that checks all the variables are initialized. ready_for_local_init_op: to check if the model is ready to run local_init_op. If it's set to USE_DEFAULT, creates an op that checks all the global variables are initialized.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:_init_ready_op arg:self arg:ready_op arg:ready_for_local_init_op arguments arg arg arg If Compare Assign Call If Compare Assign Call Call Assign If Compare Assign Call Assign" + }, + { + "library": "django", + "name": "datetime_cast_date_sql", + "source_code": "def datetime_cast_date_sql(self, sql, params, tzname):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date_sql() method.')", + "docstring": "Return the SQL to cast a datetime value to date value.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:datetime_cast_date_sql arg:self arg:sql arg:params arg:tzname arguments arg arg arg arg Raise Call" + }, + { + "library": "numpy", + "name": "_collapse", + "source_code": "def _collapse(self, axis):\n if axis is None:\n return self[0, 0]\n else:\n return self", + "docstring": "A convenience function for operations that want to collapse to a scalar like _align, but are using keepdims=True", + "type": "method", + "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", + "ast_data": "FunctionDef name:_collapse arg:self arg:axis arguments arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "square", + "source_code": "def square(t, duty=0.5):\n t, w = (asarray(t), asarray(duty))\n w = asarray(w + (t - t))\n t = asarray(t + (w - w))\n y = zeros(t.shape, dtype='d')\n mask1 = (w > 1) | (w < 0)\n place(y, mask1, nan)\n tmod = mod(t, 2 * pi)\n mask2 = 1 - mask1 & (tmod < w * 2 * pi)\n place(y, mask2, 1)\n mask3 = 1 - mask1 & 1 - mask2\n place(y, mask3, -1)\n return y", + "docstring": "Return a periodic square-wave waveform. The square wave has a period `duty` must be in the interval [0,1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like The input time array. duty : array_like, optional Duty cycle. Default is 0.5 (50% duty cycle). If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the square waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500, endpoint=False) >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) >>> plt.ylim(-2, 2) A pulse-width modulated sine wave: >>> plt.figure() >>> sig = np.sin(2 * np.pi * t) >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) >>> plt.subplot(2, 1, 1) >>> plt.plot(t, sig) >>> plt.subplot(2, 1, 2) >>> plt.plot(t, pwm) >>> plt.ylim(-1.5, 1.5)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_waveforms.py", + "ast_data": "FunctionDef name:square arg:t arg:duty arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Call Assign Call Assign Compare Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_gotitem", + "source_code": "def _gotitem(self, key, ndim, subset=None):\n if subset is None:\n subset = self.obj\n kwargs = {attr: getattr(self, attr) for attr in self._attributes}\n selection = self._infer_selection(key, subset)\n new_win = type(self)(subset, selection=selection, **kwargs)\n return new_win", + "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : str / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\rolling.py", + "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_orthogonal_kernel", + "source_code": "def _orthogonal_kernel(self, ksize, cin, cout):\n if cin > cout:\n raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n orth = self._orthogonal_matrix(cout)[0:cin, :]\n if ksize == 1:\n return array_ops.expand_dims(orth, 0)\n p = self._block_orth(self._symmetric_projection(cout))\n for _ in range(ksize - 2):\n temp = self._block_orth(self._symmetric_projection(cout))\n p = self._matrix_conv(p, temp)\n for i in range(ksize):\n p[i] = math_ops.matmul(orth, p[i])\n return self._dict_to_tensor(p, ksize)", + "docstring": "Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_orthogonal_kernel arg:self arg:ksize arg:cin arg:cout arguments arg arg arg arg If Compare Raise Call Assign Call If Compare Return return:yes Call Assign Call Call For Call Assign Call Call Assign Call For Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "PipeliningShapeError", + "source_code": "class PipeliningShapeError(RuntimeError):\n pass", + "docstring": "Shape mismatch between configured and runtime values.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py", + "ast_data": "ClassDef name:PipeliningShapeError" + }, + { + "library": "scikit-learn", + "name": "_compute_score_samples", + "source_code": "def _compute_score_samples(self, X, subsample_features):\n n_samples = X.shape[0]\n depths = np.zeros(n_samples, order='f')\n average_path_length_max_samples = _average_path_length([self._max_samples])\n lock = threading.Lock()\n Parallel(verbose=self.verbose, require='sharedmem')((delayed(_parallel_compute_tree_depths)(tree, X, features if subsample_features else None, self._decision_path_lengths[tree_idx], self._average_path_length_per_tree[tree_idx], depths, lock) for tree_idx, (tree, features) in enumerate(zip(self.estimators_, self.estimators_features_))))\n denominator = len(self.estimators_) * average_path_length_max_samples\n scores = 2 ** (-np.divide(depths, denominator, out=np.ones_like(depths), where=denominator != 0))\n return scores", + "docstring": "Compute the score of each samples in X going through the extra trees. Parameters ---------- X : array-like or sparse matrix Data matrix. subsample_features : bool Whether features should be subsampled. Returns ------- scores : ndarray of shape (n_samples,) The score of each sample in X.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py", + "ast_data": "FunctionDef name:_compute_score_samples arg:self arg:X arg:subsample_features arguments arg arg arg Assign Assign Call Assign Call Assign Call Call Call Call Call Call Call Assign Call Assign Call Call Compare Return return:yes" + }, + { + "library": "numpy", + "name": "getargspec", + "source_code": "def getargspec(func):\n if ismethod(func):\n func = func.__func__\n if not isfunction(func):\n raise TypeError('arg is not a Python function')\n args, varargs, varkw = getargs(func.__code__)\n return (args, varargs, varkw, func.__defaults__)", + "docstring": "Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments.", + "type": "function", + "file_path": "numpy\\numpy\\_utils\\_inspect.py", + "ast_data": "FunctionDef name:getargspec arg:func arguments arg If Call Assign If Call Raise Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_execution_mode", + "source_code": "def set_execution_mode(mode):\n context().execution_mode = mode", + "docstring": "Sets execution mode for the current thread.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:set_execution_mode arg:mode arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "target", + "source_code": "@property\ndef target(self) -> str:\n return '{0}://localhost:{1}'.format(self._config.protocol, self._server.bound_port())", + "docstring": "Returns a target that can be used to connect to the server. >>> dispatcher = tf.data.experimental.service.DispatchServer() >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.apply(tf.data.experimental.service.distribute( ... processing_mode=\"parallel_epochs\", service=dispatcher.target)) The returned string will be in the form protocol://address, e.g. \"grpc://localhost:5050\".", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:target arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_iset_split_block", + "source_code": "def _iset_split_block(self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None=None, refs: BlockValuesRefs | None=None) -> None:\n blk = self.blocks[blkno_l]\n if self._blklocs is None:\n self._rebuild_blknos_and_blklocs()\n nbs_tup = tuple(blk.delete(blk_locs))\n if value is not None:\n locs = blk.mgr_locs.as_array[blk_locs]\n first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)\n else:\n first_nb = nbs_tup[0]\n nbs_tup = tuple(nbs_tup[1:])\n nr_blocks = len(self.blocks)\n blocks_tup = self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1:] + nbs_tup\n self.blocks = blocks_tup\n if not nbs_tup and value is not None:\n return\n self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))\n for i, nb in enumerate(nbs_tup):\n self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))\n self._blknos[nb.mgr_locs.indexer] = i + nr_blocks", + "docstring": "Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determining the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. refs: The reference tracking object of the value to set.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:_iset_split_block arg:self arg:blkno_l arg:blk_locs arg:value arg:refs arguments arg arg arg arg arg Assign If Compare Call Assign Call Call If Compare Assign Assign Call Call Assign Assign Call Assign Call Assign Assign If BoolOp Compare Return return:no Assign Call Call For Call Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_get_or_create_assets_dir", + "source_code": "def _get_or_create_assets_dir(export_dir):\n assets_destination_dir = _get_assets_dir(export_dir)\n file_io.recursive_create_dir(assets_destination_dir)\n return assets_destination_dir", + "docstring": "Return assets sub-directory, or create one if it doesn't exist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py", + "ast_data": "FunctionDef name:_get_or_create_assets_dir arg:export_dir arguments arg Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_asarray_validated", + "source_code": "def _asarray_validated(a, check_finite=True, sparse_ok=False, objects_ok=False, mask_ok=False, as_inexact=False):\n if not sparse_ok:\n if issparse(a):\n msg = 'Sparse arrays/matrices are not supported by this function. Perhaps one of the `scipy.sparse.linalg` functions would work instead.'\n raise ValueError(msg)\n if not mask_ok:\n if np.ma.isMaskedArray(a):\n raise ValueError('masked arrays are not supported')\n toarray = np.asarray_chkfinite if check_finite else np.asarray\n a = toarray(a)\n if not objects_ok:\n if a.dtype is np.dtype('O'):\n raise ValueError('object arrays are not supported')\n if as_inexact:\n if not np.issubdtype(a.dtype, np.inexact):\n a = toarray(a, dtype=np.float64)\n return a", + "docstring": "Helper function for SciPy argument validation. Many SciPy linear algebra functions do support arbitrary array-like input arguments. Examples of commonly unsupported inputs include matrices containing inf/nan, sparse matrix representations, and matrices with complicated elements. Parameters ---------- a : array_like The array-like input. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True sparse_ok : bool, optional True if scipy sparse matrices are allowed. objects_ok : bool, optional True if arrays with dype('O') are allowed. mask_ok : bool, optional True if masked arrays are allowed. as_inexact : bool, optional True to convert the input array to a np.inexact dtype. Returns ------- ret : ndarray The converted validated array.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_util.py", + "ast_data": "FunctionDef name:_asarray_validated arg:a arg:check_finite arg:sparse_ok arg:objects_ok arg:mask_ok arg:as_inexact arguments arg arg arg arg arg arg If If Call Assign Raise Call If If Call Raise Call Assign Assign Call If If Compare Call Raise Call If If Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__call__", + "source_code": "def __call__(self, *args, **kwargs):\n return self.error(*args, **kwargs)", + "docstring": "Record an error log entry.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cplogging.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "map_and_batch", + "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by `tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data optimizations will take care of using the fused implementation.')\n@tf_export('data.experimental.map_and_batch')\ndef map_and_batch(map_func, batch_size, num_parallel_batches=None, drop_remainder=False, num_parallel_calls=None):\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(f'`map_and_batch` allows only one of `num_parallel_batches` and `num_parallel_calls` to be set, but `num_parallel_batches` was set to {num_parallel_batches} and `num_parallel_calls` as set to {num_parallel_calls}.')\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size, num_parallel_calls, drop_remainder)\n return _apply_fn", + "docstring": "Fused implementation of and . Maps across consecutive elements of this dataset and then combines them into a batch. Functionally, it is equivalent to followed by . This API is temporary and deprecated since input pipeline optimization now fuses consecutive and operations automatically. Args: map_func: A function mapping a nested structure of tensors to another nested structure of tensors. batch_size: A scalar , representing the number of consecutive elements of this dataset to combine in a single batch. num_parallel_batches: (Optional.) A scalar , representing the number of batches to create in parallel. On one hand, higher values can help mitigate the effect of stragglers. On the other hand, higher values can increase contention if CPU is scarce. drop_remainder: (Optional.) A scalar , representing whether the last batch should be dropped in case its size is smaller than desired; the default behavior is not to drop the smaller batch. num_parallel_calls: (Optional.) A scalar , representing the number of elements to process in parallel. If not specified, elements will be processed in parallel. If the value is used, then the number of parallel calls is set dynamically based on available CPU. Returns: A transformation function, which can be passed to . Raises: ValueError: If both and are specified.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py", + "ast_data": "FunctionDef name:map_and_batch arg:map_func arg:batch_size arg:num_parallel_batches arg:drop_remainder arg:num_parallel_calls arguments arg arg arg arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign If BoolOp Compare Compare Raise Call FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "conv3d_transpose_v2", + "source_code": "@tf_export('nn.conv3d_transpose', v1=[])\n@dispatch.add_dispatch_support\ndef conv3d_transpose_v2(input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None):\n with ops.name_scope(name, 'conv3d_transpose', [input, filter, output_shape]) as name:\n if data_format is None:\n data_format = 'NDHWC'\n channel_index = 1 if data_format.startswith('NC') else 4\n strides = _get_sequence(strides, 3, channel_index, 'strides')\n dilations = _get_sequence(dilations, 3, channel_index, 'dilations')\n return gen_nn_ops.conv3d_backprop_input_v2(input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)", + "docstring": "The transpose of . This operation is sometimes called \"deconvolution\" after (Zeiler et al., 2010), but is really the transpose (gradient) of rather than an actual deconvolution. Args: input: A 5-D of type and shape for data format or for data format. filters: A 5-D with the same type as and shape . 's dimension must match that of . output_shape: A 1-D representing the output shape of the deconvolution op. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of . If a single value is given it is replicated in the , and dimension. By default the and dimensions are set to 0. The dimension order is determined by the value of , see below for details. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: A string. 'NDHWC' and 'NCDHW' are supported. dilations: An int or list of that has length , or , defaults to 1. The dilation factor for each dimension of. If a single value is given it is replicated in the , and dimension. By default the and dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of , see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. name: Optional name for the returned tensor. Returns: A with the same type as . References: Deconvolutional Networks: [Zeiler et al., 2010] ( ([pdf] (", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:conv3d_transpose_v2 arg:input arg:filters arg:output_shape arg:strides arg:padding arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "nms", + "source_code": "def nms(signal: Tensor, window_size: int=5, cutoff: float=0.0) -> Tensor:\n if window_size % 2 != 1:\n raise ValueError(f'window_size has to be odd, got {window_size}')\n _, ixs = F.max_pool2d(signal, kernel_size=window_size, stride=1, padding=window_size // 2, return_indices=True)\n h, w = signal.shape[1:]\n coords = torch.arange(h * w, device=signal.device).reshape(1, h, w)\n nms = ixs == coords\n if cutoff is None:\n return nms\n else:\n return nms & (signal > cutoff)", + "docstring": "Apply non-maximum suppression.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\disk\\detector.py", + "ast_data": "FunctionDef name:nms arg:signal arg:window_size arg:cutoff arguments arg arg arg If Compare Raise Call Assign Call Assign Assign Call Call Assign Compare If Compare Return return:yes Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, 'n_features_in_')\n input_features = _check_feature_names_in(self, input_features)\n prefix = self.__class__.__name__.lower()\n return np.asarray([f'{prefix}_{feature_name}' for feature_name in input_features[self.features_]], dtype=object)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_base.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "weights", + "source_code": "@property\ndef weights(self):\n return self.trainable_weights + self.non_trainable_weights", + "docstring": "Returns the list of all layer variables/weights. Returns: A list of variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:weights arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_IterDataPipeMeta", + "source_code": "class _IterDataPipeMeta(_DataPipeMeta):\n\n def __new__(cls, name, bases, namespace, **kwargs):\n if 'reset' in namespace:\n reset_func = namespace['reset']\n\n @functools.wraps(reset_func)\n def conditional_reset(*args, **kwargs):\n datapipe = args[0]\n if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):\n datapipe._number_of_samples_yielded = 0\n datapipe._fast_forward_iterator = None\n reset_func(*args, **kwargs)\n datapipe._snapshot_state = _SnapshotState.Iterating\n namespace['reset'] = conditional_reset\n if '__iter__' in namespace:\n hook_iterator(namespace)\n return super().__new__(cls, name, bases, namespace, **kwargs)", + "docstring": "Metaclass for and inherits from . Add various functions for behaviors specific to .", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py", + "ast_data": "ClassDef name:_IterDataPipeMeta FunctionDef name:__new__ arg:cls arg:name arg:bases arg:namespace arguments arg arg arg arg arg If Compare Assign FunctionDef name:conditional_reset arguments arg arg Assign If Compare Assign Assign Call Assign Call Assign If Compare Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "to_structured_signature", + "source_code": "def to_structured_signature(function_type: FunctionType) -> Tuple[Any, Any]:\n\n def to_signature(x_type):\n if x_type is None:\n raise TypeError(f'Can not generate structured signature if FunctionType is not fully specified. Received {function_type}')\n return x_type.placeholder_value(trace_type.InternalPlaceholderContext(unnest_only=True))\n args_signature = []\n kwargs_signature = {}\n for p in function_type.parameters.values():\n if p.kind == Parameter.POSITIONAL_ONLY:\n args_signature.append(to_signature(p.type_constraint))\n else:\n kwargs_signature[p.name] = to_signature(p.type_constraint)\n input_signature = (tuple(args_signature), kwargs_signature)\n output_signature = to_signature(function_type.output)\n return (input_signature, output_signature)", + "docstring": "Returns structured input and output signatures from a FunctionType.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:to_structured_signature arg:function_type arguments arg FunctionDef name:to_signature arg:x_type arguments arg If Compare Raise Call Return return:yes Call Call Assign Assign For Call If Compare Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, img: Tensor, lafs: Tensor) -> Tensor:\n return get_laf_descriptors(img, lafs, self.descriptor, self.patch_size, self.grayscale_descriptor)", + "docstring": "Three stage local feature detection. First the location and scale of interest points are determined by detect function. Then affine shape and orientation. Args: img: image features with shape :math:. lafs: local affine frames :math:. Returns: Local descriptors of shape :math: where :math: is descriptor size.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\integrated.py", + "ast_data": "FunctionDef name:forward arg:self arg:img arg:lafs arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "run", + "source_code": "def run(self, fn, args=(), kwargs=None, options=None):\n validate_run_function(fn)\n fn, args, kwargs = _maybe_partial_apply_variables(fn, args, kwargs)\n fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n options = options or distribute_lib.RunOptions()\n return self.extended.tpu_run(fn, args, kwargs, options)", + "docstring": "Run the computation defined by on each TPU replica. Executes ops specified by on each replica. If or have , such as those produced by a from or , when is executed on a particular replica, it will be executed with the component of that correspond to that replica. may call to access members such as . All arguments in or should either be nest of tensors or containing tensors or composite tensors. Example usage: >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') >>> tf.config.experimental_connect_to_cluster(resolver) >>> tf.tpu.experimental.initialize_tpu_system(resolver) >>> strategy = tf.distribute.TPUStrategy(resolver) >>> @tf.function ... def run(): ... def value_fn(value_context): ... return value_context.num_replicas_in_sync ... distributed_values = ( ... strategy.experimental_distribute_values_from_function(value_fn)) ... def replica_fn(input): ... return input * 2 ... return strategy.run(replica_fn, args=(distributed_values,)) >>> result = run() Args: fn: The function to run. The output must be a of s. args: (Optional) Positional arguments to . kwargs: (Optional) Keyword arguments to . options: (Optional) An instance of specifying the options to run . Returns: Merged return value of across replicas. The structure of the return value is the same as the return value from . Each element in the structure can either be , objects, or s (for example, if running on a single replica).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Call Assign Call Assign Call Call Assign BoolOp Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "from_str", + "source_code": "@classmethod\ndef from_str(cls, elementstr):\n ival, params = cls.parse(elementstr)\n return cls(ival, params)", + "docstring": "Construct an instance from a string of the form 'token;key=val'.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:from_str arg:cls arg:elementstr arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "strategy", + "source_code": "@property\ndef strategy(self):\n return self._strategy", + "docstring": "Returns the associated with the .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:strategy arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "add", + "source_code": "def add(a, b):\n return _maybe_static(a) + _maybe_static(b)", + "docstring": "A version of tf.add that eagerly evaluates if possible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:add arg:a arg:b arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_nc_hypergeom_mean_inverse", + "source_code": "def _nc_hypergeom_mean_inverse(x, M, n, N):\n nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)\n return nc", + "docstring": "For the given noncentral hypergeometric parameters x, M, n,and N (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 contingency table), find the noncentrality parameter of Fisher's noncentral hypergeometric distribution whose mean is x.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_odds_ratio.py", + "ast_data": "FunctionDef name:_nc_hypergeom_mean_inverse arg:x arg:M arg:n arg:N arguments arg arg arg arg Assign Call arguments arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, f: _T) -> _T:\n gradient_registry.register(f, self._op_type)\n return f", + "docstring": "Registers the function as gradient function for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:f arguments arg arg Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_score_mod", + "source_code": "def get_score_mod(self, score_mod: Optional[_score_mod_signature]) -> _score_mod_signature:\n if score_mod is None:\n score_mod = _identity\n\n def new_score_mod(score: torch.Tensor, b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, physical_kv_idx: torch.Tensor):\n physical_kv_block = physical_kv_idx // self.page_size\n physical_kv_offset = physical_kv_idx % self.page_size\n logical_block_idx = self.physical_to_logical[b, physical_kv_block]\n logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset\n return torch.where(logical_block_idx >= 0, score_mod(score, b, h, q_idx, logical_kv_idx), float('-inf'))\n return new_score_mod", + "docstring": "Converts a score_mod based on mapping from the physical block index to the logical block index. Args: score_mod (_score_mod_signature): score_mod based on the logical block index.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py", + "ast_data": "FunctionDef name:get_score_mod arg:self arg:score_mod arguments arg arg If Compare Assign FunctionDef name:new_score_mod arg:score arg:b arg:h arg:q_idx arg:physical_kv_idx arguments arg arg arg arg arg Assign Assign Assign Assign Return return:yes Call Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "reset", + "source_code": "def reset(self):\n self.head = None\n self.errors = set()\n self.node_index = {}\n self.leaves = set()\n self.active_stmts = set()\n self.owners = {}\n self.forward_edges = set()\n self.finally_sections = {}\n self.finally_section_subgraphs = {}\n self.finally_section_has_direct_flow = {}\n self.pending_finally_sections = set()\n self.exits = {}\n self.section_entry = {}\n self.continues = {}\n self.raises = {}\n self.cond_entry = {}\n self.cond_leaves = {}", + "docstring": "Resets the state of this factory.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg Assign Assign Call Assign Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Call Assign Assign Assign Assign Assign Assign" + }, + { + "library": "numpy", + "name": "default_config_dict", + "source_code": "def default_config_dict(name=None, parent_name=None, local_path=None):\n import warnings\n warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of deprecated default_config_dict(%r,%r,%r)' % (name, parent_name, local_path, name, parent_name, local_path), stacklevel=2)\n c = Configuration(name, parent_name, local_path)\n return c.todict()", + "docstring": "Return a configuration dictionary for usage in configuration() function defined in file setup_.py.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:default_config_dict arg:name arg:parent_name arg:local_path arguments arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n check_is_fitted(self, 'estimator_')\n pos_label = self._curve_scorer._get_pos_label()\n y_score, _ = _get_response_values_binary(self.estimator_, X, self._get_response_method(), pos_label=pos_label)\n return _threshold_scores_to_class_labels(y_score, self.best_threshold_, self.classes_, pos_label)", + "docstring": "Predict the target of new samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The samples, as accepted by . Returns ------- class_labels : ndarray of shape (n_samples,) The predicted class.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_check_shape", + "source_code": "def _check_shape(argshape, size):\n scalar_shape = []\n bc = []\n for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], fillvalue=1):\n if sizedim > argdim or argdim == sizedim == 1:\n scalar_shape.append(sizedim)\n bc.append(True)\n else:\n bc.append(False)\n return (tuple(scalar_shape[::-1]), tuple(bc[::-1]))", + "docstring": "This is a utility function used by in the class geninvgauss_gen. It compares the tuple argshape to the tuple size. Parameters ---------- argshape : tuple of integers Shape of the arguments. size : tuple of integers or integer Size argument of rvs(). Returns ------- The function returns two tuples, scalar_shape and bc. scalar_shape : tuple Shape to which the 1-d array of random variates returned by _rvs_scalar() is converted when it is copied into the output array of _rvs(). bc : tuple of booleans bc is an tuple the same length as size. bc[j] is True if the data associated with that index is generated in one call of _rvs_scalar().", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_check_shape arg:argshape arg:size arguments arg arg Assign Assign For Call If BoolOp Compare Compare Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_AttrsFetchMapper", + "source_code": "class _AttrsFetchMapper(_FetchMapper):\n\n def __init__(self, fetches):\n values = _get_attrs_values(fetches)\n self._fetch_type = type(fetches)\n self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]\n self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n def unique_fetches(self):\n return self._unique_fetches\n\n def build_results(self, values):\n results = []\n for m, vi in zip(self._mappers, self._value_indices):\n results.append(m.build_results([values[j] for j in vi]))\n return self._fetch_type(*results)", + "docstring": "Fetch mapper for attrs decorated classes.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "ClassDef name:_AttrsFetchMapper FunctionDef name:__init__ arg:self arg:fetches arguments arg arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:unique_fetches arg:self arguments arg Return return:yes FunctionDef name:build_results arg:self arg:values arguments arg arg Assign For Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "flatten_sharded_optim_state_dict", + "source_code": "@staticmethod\ndef flatten_sharded_optim_state_dict(sharded_optim_state_dict: dict[str, Any], model: torch.nn.Module, optim: torch.optim.Optimizer) -> dict[str, Any]:\n FullyShardedDataParallel._warn_legacy_optim_state_dict('flatten_sharded_optim_state_dict', 'optim_state_dict_to_load', stacklevel=2)\n return FullyShardedDataParallel._optim_state_dict_to_load_impl(optim_state_dict=sharded_optim_state_dict, model=model, optim_input=None, optim=optim, full_state_dict=False, is_named_optimizer=False)", + "docstring": "Flatten a sharded optimizer state-dict. The API is similar to :meth:. The only difference is that the input `sharded_optim_state_dictshard_full_optim_state_dictshard_full_optim_state_dict`.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:flatten_sharded_optim_state_dict arg:sharded_optim_state_dict arg:model arg:optim arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "FullStateDictConfig", + "source_code": "@dataclass\nclass FullStateDictConfig(StateDictConfig):\n rank0_only: bool = False", + "docstring": "`state_dict_typestatesync_module_states`)", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py", + "ast_data": "ClassDef name:FullStateDictConfig" + }, + { + "library": "tensorflow", + "name": "create_graph_debug_info_def", + "source_code": "def create_graph_debug_info_def(func_named_operations):\n builder = tf_stack.GraphDebugInfoBuilder()\n for func_name, op in func_named_operations:\n if op.traceback is None:\n continue\n builder.AccumulateStackTrace(func_name, op.name, _compute_useful_frames(op.traceback, 10))\n return builder.Build()", + "docstring": "Construct and returns a protocol buffer. Args: func_named_operations: An iterable of (func_name, op.Operation) tuples where the Operation instances have a _traceback members. The func_name should be the empty string for operations in the top-level Graph. Returns: GraphDebugInfo protocol buffer. Raises: TypeError: If the arguments are not of the correct proto buffer type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py", + "ast_data": "FunctionDef name:create_graph_debug_info_def arg:func_named_operations arguments arg Assign Call For If Compare Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_storage_engine", + "source_code": "def get_storage_engine(self, cursor, table_name):\n cursor.execute('\\n SELECT engine\\n FROM information_schema.tables\\n WHERE\\n table_name = %s AND\\n table_schema = DATABASE()\\n ', [table_name])\n result = cursor.fetchone()\n if not result:\n return self.connection.features._mysql_storage_engine\n return result[0]", + "docstring": "Retrieve the storage engine for a given table. Return the default storage engine if the table doesn't exist.", + "type": "method", + "file_path": "django\\django\\db\\backends\\mysql\\introspection.py", + "ast_data": "FunctionDef name:get_storage_engine arg:self arg:cursor arg:table_name arguments arg arg arg Call Assign Call If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_shard_orig_param_state", + "source_code": "def _shard_orig_param_state(fsdp_param_info: FSDPParamInfo, fqn: str, optim_state: dict[str, Any]) -> dict[str, Any]:\n if not optim_state:\n return {}\n fsdp_state = fsdp_param_info.state\n flat_param = fsdp_param_info.handle.flat_param\n param_idx = fsdp_param_info.param_indices[fqn]\n shard_param_info = flat_param._shard_param_infos[param_idx]\n optim_state = _gather_state_dict(optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device)\n if not shard_param_info.in_shard:\n return {}\n new_optim_state: dict[str, Any] = {}\n intra_param_start_idx = shard_param_info.intra_param_start_idx\n intra_param_end_idx = shard_param_info.intra_param_end_idx\n for state_name, value in optim_state.items():\n if torch.is_tensor(value) and value.dim() > 0 and (fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD):\n value = value.flatten()[intra_param_start_idx:intra_param_end_idx + 1].clone()\n new_optim_state[state_name] = value\n return new_optim_state", + "docstring": "Shard the optimizer state for the original parameter with the name `` is True.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py", + "ast_data": "FunctionDef name:_shard_orig_param_state arg:fsdp_param_info arg:fqn arg:optim_state arguments arg arg arg If Return return:no Assign Assign Assign Assign Assign Call If Return return:no Assign Assign For Call If BoolOp Call Compare Call Compare Assign Call Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "is_regressor", + "source_code": "def is_regressor(estimator):\n if isinstance(estimator, type):\n warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning)\n return getattr(estimator, '_estimator_type', None) == 'regressor'\n return get_tags(estimator).estimator_type == 'regressor'", + "docstring": "Return True if the given estimator is (probably) a regressor. Parameters ---------- estimator : estimator instance Estimator object to test. Returns ------- out : bool True if estimator is a regressor and False otherwise. Examples -------- >>> from sklearn.base import is_regressor >>> from sklearn.cluster import KMeans >>> from sklearn.svm import SVC, SVR >>> classifier = SVC() >>> regressor = SVR() >>> kmeans = KMeans() >>> is_regressor(classifier) False >>> is_regressor(regressor) True >>> is_regressor(kmeans) False", + "type": "function", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:is_regressor arg:estimator arguments arg If Call Call Call Call Return return:yes Compare Call Return return:yes Compare Call" + }, + { + "library": "kornia", + "name": "trans", + "source_code": "@classmethod\ndef trans(cls, x: Tensor, y: Tensor) -> Se2:\n KORNIA_CHECK(x.shape == y.shape)\n KORNIA_CHECK_SAME_DEVICES([x, y])\n batch_size = x.shape[0] if len(x.shape) > 0 else None\n rotation = So2.identity(batch_size, x.device, x.dtype)\n return cls(rotation, stack((x, y), -1))", + "docstring": "Construct a translation only Se2 instance. Args: x: the x-axis translation. y: the y-axis translation.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:trans arg:cls arg:x arg:y arguments arg arg arg Call Compare Call Assign Compare Call Assign Call Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "parse_json", + "source_code": "@staticmethod\ndef parse_json(obj):\n return ensure_dict(obj, 'JWE')", + "docstring": "Parse JWE JSON Serialization. :param obj: JWE JSON Serialization as str or dict :return: Parsed JWE JSON Serialization as dict if is an str, or as is if is already a dict", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py", + "ast_data": "FunctionDef name:parse_json arg:obj arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n X = self._check_inputs(X, in_fit=False, copy=self.copy)\n return self._transform(X, inverse=False)", + "docstring": "Feature-wise transformation of the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse `ignore_implicit_zeros` is False. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The projected data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self, export_scope=None):\n if export_scope is None or self.name.startswith(export_scope):\n context_def = control_flow_pb2.WhileContextDef()\n context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n context_def.parallel_iterations = self._parallel_iterations\n if self._maximum_iterations is not None:\n context_def.maximum_iterations_name = ops.strip_name_scope(self._maximum_iterations.name, export_scope)\n context_def.back_prop = self._back_prop\n context_def.swap_memory = self._swap_memory\n context_def.pivot_for_pred_name = ops.strip_name_scope(self._pivot_for_pred.name, export_scope)\n context_def.pivot_for_body_name = ops.strip_name_scope(self._pivot_for_body.name, export_scope)\n context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)\n context_def.loop_exit_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits])\n context_def.loop_enter_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters])\n context_def.values_def.MergeFrom(super(WhileContext, self)._to_values_def(export_scope=export_scope))\n for nested in self._nested_contexts:\n nested_def = context_def.nested_contexts.add()\n nested.to_control_flow_context_def(nested_def)\n return context_def\n else:\n return None", + "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call Assign If Compare Assign Call Assign Assign Assign Call Assign Call Assign Call Call Call Call Call Call Call Call For Assign Call Call Return return:yes Return return:no" + }, + { + "library": "sphinx", + "name": "update", + "source_code": "def update(self, config: Config) -> None:\n for key in self.LATEX_ELEMENTS_KEYS:\n if config.latex_elements.get(key):\n value = config.latex_elements[key]\n setattr(self, key, value)\n for key in self.UPDATABLE_KEYS:\n if key in config.latex_theme_options:\n value = config.latex_theme_options[key]\n setattr(self, key, value)", + "docstring": "Override theme settings by user's configuration.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py", + "ast_data": "FunctionDef name:update arg:self arg:config arguments arg arg For If Call Assign Call For If Compare Assign Call" + }, + { + "library": "django", + "name": "__get__", + "source_code": "def __get__(self, instance, cls=None):\n if instance is None:\n return self\n pattern = instance._regex\n if isinstance(pattern, str):\n instance.__dict__['regex'] = self._compile(pattern)\n return instance.__dict__['regex']\n language_code = get_language()\n if language_code not in instance._regex_dict:\n instance._regex_dict[language_code] = self._compile(str(pattern))\n return instance._regex_dict[language_code]", + "docstring": "Return a compiled regular expression based on the active language.", + "type": "method", + "file_path": "django\\django\\urls\\resolvers.py", + "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign If Call Assign Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "LZMAPlugin", + "source_code": "class LZMAPlugin:\n\n def __init__(self, file: BinaryIO, feed_options: dict[str, Any]) -> None:\n self.file = file\n self.feed_options = feed_options\n format = self.feed_options.get('lzma_format')\n check = self.feed_options.get('lzma_check', -1)\n preset = self.feed_options.get('lzma_preset')\n filters = self.feed_options.get('lzma_filters')\n self.lzmafile = LZMAFile(filename=self.file, mode='wb', format=format, check=check, preset=preset, filters=filters)\n\n def write(self, data: bytes) -> int:\n return self.lzmafile.write(data)\n\n def close(self) -> None:\n self.lzmafile.close()", + "docstring": "Compresses received data using _. Accepted `lzma_formatlzma_checklzma_presetlzma_filterslzma.LZMAFile` for more info about parameters.", + "type": "class", + "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py", + "ast_data": "ClassDef name:LZMAPlugin FunctionDef name:__init__ arg:self arg:file arg:feed_options arguments arg arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:write arg:self arg:data arguments arg arg Return return:yes Call FunctionDef name:close arg:self arguments arg Call" + }, + { + "library": "seaborn", + "name": "_not_bottom_axes", + "source_code": "@property\ndef _not_bottom_axes(self):\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = i < self._ncol * (self._nrow - 1) and i < self._ncol * (self._nrow - 1) - n_empty\n if append:\n axes.append(ax)\n return np.array(axes, object).flat", + "docstring": "Return a flat array of axes that aren't on the bottom row.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:_not_bottom_axes arg:self arguments arg If Compare Return return:yes Assign Assign For Call Assign BoolOp Compare Compare If Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "sh_chebyt", + "source_code": "def sh_chebyt(n, monic=False):\n base = sh_jacobi(n, 0.0, 0.5, monic=monic)\n if monic:\n return base\n if n > 0:\n factor = 4 ** n / 2.0\n else:\n factor = 1.0\n base._scale(factor)\n return base", + "docstring": "Shifted Chebyshev polynomial of the first kind. Defined as :math: for :math: the nth Chebyshev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- T : orthopoly1d Shifted Chebyshev polynomial of the first kind. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:sh_chebyt arg:n arg:monic arguments arg arg Assign Call If Return return:yes If Compare Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_configure_session_config_for_std_servers", + "source_code": "def _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id):\n if task_type == _TaskType.EVALUATOR:\n if eval_strategy:\n eval_strategy.configure(session_config=session_config)\n else:\n strategy = copy.deepcopy(strategy)\n strategy.configure(session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id)\n del session_config.device_filters[:]", + "docstring": "Call strategy's to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:_configure_session_config_for_std_servers arg:strategy arg:eval_strategy arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg arg If Compare If Call Assign Call Call" + }, + { + "library": "numpy", + "name": "cumulative_sum", + "source_code": "@array_function_dispatch(_cumulative_sum_dispatcher)\ndef cumulative_sum(x, /, *, axis=None, dtype=None, out=None, include_initial=False):\n return _cumulative_func(x, um.add, axis, dtype, out, include_initial)", + "docstring": "Return the cumulative sum of the elements along a given axis. This function is an Array API compatible alternative to . Parameters ---------- x : array_like Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is only allowed for one-dimensional arrays. For arrays with more than one dimension `ufuncs-output-typesum` >>> c = np.array([1, 2e-9, 3e-9] * 1000000) >>> np.cumulative_sum(c)[-1] 1000000.0050045159 >>> c.sum() 1000000.0050000029", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:cumulative_sum arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "total_run_calls", + "source_code": "@property\n@deprecated(None, 'Track steps using a tf.Variable saved in checkpoint instead.')\n@doc_controls.do_not_generate_docs\ndef total_run_calls(self):\n if self._platform_device == failure_handling_util.PlatformDevice.INTERNAL_TPU:\n raise NotImplementedError('Please create variables saved in checkpoint to keep track of steps and epochs.')\n return self._run_counter", + "docstring": "Returns the number of times is called. DEPRECATED: user should track total steps themselves, as this API provides little expressivity gain but could easily be misused and incurs extra synchronization cost for TPUStrategy users. This value tracks the number of all calls to including those before the program is restarted and the training is restored, by saving and reading the value in the checkpoint. A user can compute their total number of iterations by , while should be one for users. They can also use this value to infer the starting epoch and step after training restores, as shown in the example above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "FunctionDef name:total_run_calls arg:self arguments arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_streaming_sparse_true_positive_at_k", + "source_code": "def _streaming_sparse_true_positive_at_k(labels, predictions_idx, k=None, class_id=None, weights=None, name=None):\n with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id), (predictions_idx, labels, weights)) as scope:\n tp = _sparse_true_positive_at_k(predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights)\n batch_total_tp = math_ops.cast(math_ops.reduce_sum(tp), dtypes.float64)\n var = metric_variable([], dtypes.float64, name=scope)\n return (var, state_ops.assign_add(var, batch_total_tp, name='update'))", + "docstring": "Calculates weighted per step true positives for recall@k and precision@k. If is specified, calculate binary true positives for only. If is not specified, calculate metrics for predicted vs label classes, where is the 2nd dimension of . If is , weights default to 1. Use weights of 0 to mask values. Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. [D1, ... DN] must match . predictions_idx: 1-D or higher with last dimension , top predicted classes. For rank , the first dimensions must match . k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: whose rank is either 0, or n-1, where n is the rank of . If the latter, it must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of and update . Raises: ValueError: If is not and has an incompatible shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:_streaming_sparse_true_positive_at_k arg:labels arg:predictions_idx arg:k arg:class_id arg:weights arg:name arguments arg arg arg arg arg arg With Call Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_repr_png_", + "source_code": "def _repr_png_(self):\n if not self._isinit:\n self._init()\n pixels = self.lut\n if pixels.shape[0] < _BIVAR_REPR_PNG_SIZE:\n pixels = np.repeat(pixels, repeats=_BIVAR_REPR_PNG_SIZE // pixels.shape[0], axis=0)[:256, :]\n if pixels.shape[1] < _BIVAR_REPR_PNG_SIZE:\n pixels = np.repeat(pixels, repeats=_BIVAR_REPR_PNG_SIZE // pixels.shape[1], axis=1)[:, :256]\n pixels = (pixels[::-1, :, :] * 255).astype(np.uint8)\n png_bytes = io.BytesIO()\n title = self.name + ' BivarColormap'\n author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'\n pnginfo = PngInfo()\n pnginfo.add_text('Title', title)\n pnginfo.add_text('Description', title)\n pnginfo.add_text('Author', author)\n pnginfo.add_text('Software', author)\n Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)\n return png_bytes.getvalue()", + "docstring": "Generate a PNG representation of the BivarColormap.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:_repr_png_ arg:self arguments arg If Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_transform", + "source_code": "def set_transform(self, t):\n self._transform = t\n self._transformSet = True\n self.pchanged()\n self.stale = True", + "docstring": "Set the artist transform. Parameters ---------- t :", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_transform arg:self arg:t arguments arg arg Assign Assign Call Assign" + }, + { + "library": "django", + "name": "stringfilter", + "source_code": "def stringfilter(func):\n\n @wraps(func)\n def _dec(first, *args, **kwargs):\n first = str(first)\n result = func(first, *args, **kwargs)\n if isinstance(first, SafeData) and getattr(unwrap(func), 'is_safe', False):\n result = mark_safe(result)\n return result\n return _dec", + "docstring": "Decorator for filters which should only receive strings. The object passed as the first positional argument will be converted to a string.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:stringfilter arg:func arguments arg FunctionDef name:_dec arg:first arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Call Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_fill_between_process_units", + "source_code": "def _fill_between_process_units(self, ind_dir, dep_dir, ind, dep1, dep2, **kwargs):\n return map(np.ma.masked_invalid, self._process_unit_info([(ind_dir, ind), (dep_dir, dep1), (dep_dir, dep2)], kwargs))", + "docstring": "Handle united data, such as dates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:_fill_between_process_units arg:self arg:ind_dir arg:dep_dir arg:ind arg:dep1 arg:dep2 arguments arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "_fix_polygon", + "source_code": "@classmethod\ndef _fix_polygon(cls, poly, clone=True):\n if clone:\n poly = poly.clone()\n if not poly.exterior_ring.is_counterclockwise:\n poly.exterior_ring = list(reversed(poly.exterior_ring))\n for i in range(1, len(poly)):\n if poly[i].is_counterclockwise:\n poly[i] = list(reversed(poly[i]))\n return poly", + "docstring": "Fix single polygon orientation as described in __init__().", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\adapter.py", + "ast_data": "FunctionDef name:_fix_polygon arg:cls arg:poly arg:clone arguments arg arg arg If Assign Call If Assign Call Call For Call Call If Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_cached_transform", + "source_code": "def _cached_transform(sub_pipeline, *, cache, param_name, param_value, transform_params):\n if param_name not in cache:\n if isinstance(param_value, tuple):\n cache[param_name] = tuple((sub_pipeline.transform(element, **transform_params) for element in param_value))\n else:\n cache[param_name] = sub_pipeline.transform(param_value, **transform_params)\n return cache[param_name]", + "docstring": "Transform a parameter value using a sub-pipeline and cache the result. Parameters ---------- sub_pipeline : Pipeline The sub-pipeline to be used for transformation. cache : dict The cache dictionary to store the transformed values. param_name : str The name of the parameter to be transformed. param_value : object The value of the parameter to be transformed. transform_params : dict The metadata to be used for transformation. This passed to the method of the sub-pipeline. Returns ------- transformed_value : object The transformed value of the parameter.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_cached_transform arg:sub_pipeline arguments arg arg arg arg arg If Compare If Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "serialize_sparse", + "source_code": "@tf_export(v1=['io.serialize_sparse', 'serialize_sparse'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('serialize_sparse')\ndef serialize_sparse(sp_input, name=None, out_type=dtypes.string):\n return serialize_sparse_v2(sp_input, out_type, name)", + "docstring": "Serialize a into a 3-vector (1-D ) object. Args: sp_input: The input . name: A name prefix for the returned tensors (optional). out_type: The to use for serialization. Returns: A 3-vector (1-D ), with each column representing the serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:serialize_sparse arg:sp_input arg:name arg:out_type arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "roots_hermitenorm", + "source_code": "def roots_hermitenorm(n, mu=False):\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n mu0 = np.sqrt(2.0 * np.pi)\n if n <= 150:\n\n def an_func(k):\n return 0.0 * k\n\n def bn_func(k):\n return np.sqrt(k)\n f = _ufuncs.eval_hermitenorm\n\n def df(n, x):\n return n * _ufuncs.eval_hermitenorm(n - 1, x)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)\n else:\n nodes, weights = _roots_hermite_asy(m)\n nodes *= sqrt(2)\n weights *= sqrt(2)\n if mu:\n return (nodes, weights, mu0)\n else:\n return (nodes, weights)", + "docstring": "Gauss-Hermite (statistician's) quadrature. Compute the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the nth degree Hermite polynomial, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.15 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad numpy.polynomial.hermite_e.hermegauss Notes ----- For small n up to 150 a modified version of the Golub-Welsch algorithm is used. Nodes are computed from the eigenvalue problem and improved by one step of a Newton iteration. The weights are computed from the well-known analytical formula. For n larger than 150 an optimal asymptotic algorithm is used which computes nodes and weights in a numerical stable manner. The algorithm has linear runtime making computation for very large n (several thousand or more) feasible. References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:roots_hermitenorm arg:n arg:mu arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign Call If Compare FunctionDef name:an_func arg:k arguments arg Return return:yes FunctionDef name:bn_func arg:k arguments arg Return return:yes Call Assign FunctionDef name:df arg:n arg:x arguments arg arg Return return:yes Call Return return:yes Call Assign Call Call Call If Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "FortranRoutine", + "source_code": "class FortranRoutine:\n type = 'generic'\n\n def __init__(self, name=None, filename=None):\n self.filename = filename\n if name is None:\n root, ext = os.path.splitext(filename)\n name = root\n self.name = name\n self._dependencies = None\n\n def dependencies(self):\n if self._dependencies is None:\n deps = fortran.getDependencies(self.filename)\n self._dependencies = [d.lower() for d in deps]\n return self._dependencies\n\n def __repr__(self):\n return f'FortranRoutine({self.name!r}, filename={self.filename!r})'", + "docstring": "Wrapper for a Fortran routine in a file.", + "type": "class", + "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py", + "ast_data": "ClassDef name:FortranRoutine Assign FunctionDef name:__init__ arg:self arg:name arg:filename arguments arg arg arg Assign If Compare Assign Call Assign Assign Assign FunctionDef name:dependencies arg:self arguments arg If Compare Assign Call Assign Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_get_window_indexer", + "source_code": "def _get_window_indexer(self) -> BaseIndexer:\n if isinstance(self.window, BaseIndexer):\n return self.window\n if self._win_freq_i8 is not None:\n return VariableWindowIndexer(index_array=self._index_array, window_size=self._win_freq_i8, center=self.center)\n return FixedWindowIndexer(window_size=self.window)", + "docstring": "Return an indexer class that will compute the window start and end bounds", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\rolling.py", + "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg If Call Return return:yes If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_non_decade_format", + "source_code": "def _non_decade_format(self, sign_string, base, fx, usetex):\n return '$\\\\mathdefault{%s%s^{%.2f}}$' % (sign_string, base, fx)", + "docstring": "Return string for non-decade locations.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:_non_decade_format arg:self arg:sign_string arg:base arg:fx arg:usetex arguments arg arg arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "op_list", + "source_code": "def op_list(**configs):\n generated_configs = []\n if 'attrs' not in configs:\n raise ValueError('Missing attrs in configs')\n for inputs in configs['attrs']:\n tmp_result = {configs['attr_names'][i]: input_value for i, input_value in enumerate(inputs)}\n generated_configs.append(tmp_result)\n return generated_configs", + "docstring": "Generate a list of ops organized in a specific format. It takes two parameters which are \"attr_names\" and \"attr\". attrs stores the name and function of operators. Args: configs: key-value pairs including the name and function of operators. attrs and attr_names must be present in configs. Return: a sequence of dictionaries which stores the name and function of ops in a specifal format Example: attrs = [ [\"abs\", torch.abs], [\"abs_\", torch.abs_], ] attr_names = [\"op_name\", \"op\"]. With those two examples, we will generate (({\"op_name\": \"abs\"}, {\"op\" : torch.abs}), ({\"op_name\": \"abs_\"}, {\"op\" : torch.abs_}))", + "type": "function", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py", + "ast_data": "FunctionDef name:op_list arguments arg Assign If Compare Raise Call For Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_mean_tweedie_deviance", + "source_code": "def _mean_tweedie_deviance(y_true, y_pred, sample_weight, power):\n xp, _, device_ = get_namespace_and_device(y_true, y_pred)\n p = power\n if p < 0:\n dev = 2 * (xp.pow(xp.where(y_true > 0, y_true, 0.0), 2 - p) / ((1 - p) * (2 - p)) - y_true * xp.pow(y_pred, 1 - p) / (1 - p) + xp.pow(y_pred, 2 - p) / (2 - p))\n elif p == 0:\n dev = (y_true - y_pred) ** 2\n elif p == 1:\n dev = 2 * (xlogy(y_true, y_true / y_pred) - y_true + y_pred)\n elif p == 2:\n dev = 2 * (xp.log(y_pred / y_true) + y_true / y_pred - 1)\n else:\n dev = 2 * (xp.pow(y_true, 2 - p) / ((1 - p) * (2 - p)) - y_true * xp.pow(y_pred, 1 - p) / (1 - p) + xp.pow(y_pred, 2 - p) / (2 - p))\n return float(_average(dev, weights=sample_weight))", + "docstring": "Mean Tweedie deviance regression loss.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py", + "ast_data": "FunctionDef name:_mean_tweedie_deviance arg:y_true arg:y_pred arg:sample_weight arg:power arguments arg arg arg arg Assign Call Assign If Compare Assign Call Call Compare Call Call If Compare Assign If Compare Assign Call If Compare Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "is_leaf_module", + "source_code": "@compatibility(is_backward_compatible=True)\ndef is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:\n return (m.__module__.startswith('torch.nn') or m.__module__.startswith('torch.ao.nn')) and (not isinstance(m, torch.nn.Sequential))", + "docstring": "A method to specify whether a given `` here.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:is_leaf_module arg:self arg:m arg:module_qualified_name arguments arg arg arg Return return:yes BoolOp BoolOp Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_in_functional_construction_mode", + "source_code": "def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list):\n return any((isinstance(tensor, keras_tensor.KerasTensor) for tensor in nest.flatten([inputs, args, kwargs])))", + "docstring": "Check the arguments to see if we are constructing a functional model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:_in_functional_construction_mode arg:layer arg:inputs arg:args arg:kwargs arg:input_list arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "seaborn", + "name": "refline", + "source_code": "def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n return self", + "docstring": "Add a reference line(s) to each facet. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. color : :mod: Specifies the color of the reference line(s). Pass `matplotlib.axes.Axes.axvlinematplotlib.axes.Axes.axhlineFacetGrid` for easy method chaining.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:refline arg:self arguments arg arg arg arg arg arg Assign Assign If Compare Call If Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_build_shuffle_gather", + "source_code": "def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):\n num_source_devices = len(input_tensors)\n num_gather_devices = len(gather_devices)\n shape = input_tensors[0].shape\n if len(shape) != 1:\n raise ValueError('input_tensors must be 1D')\n shards_by_source = []\n for d in range(0, num_source_devices):\n with ops.colocate_with(input_tensors[d]):\n shards_by_source.append(_ragged_split(input_tensors[d], num_gather_devices))\n reduced_shards = []\n for d in range(0, num_gather_devices):\n with ops.device(gather_devices[d]):\n values = [s[d] for s in shards_by_source]\n red_shard = red_op(values)\n if un_op:\n red_shard = un_op(red_shard)\n reduced_shards.append(red_shard)\n return reduced_shards", + "docstring": "Construct the gather (concentrate and reduce) phase of shuffle all-reduce. Args: input_tensors: list of values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: the binary reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of which are the fully reduced shards. Raises: ValueError: inputs not well-formed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_build_shuffle_gather arg:input_tensors arg:gather_devices arg:red_op arg:un_op arguments arg arg arg arg Assign Call Assign Call Assign If Compare Call Raise Call Assign For Call With Call Call Call Assign For Call With Call Assign Assign Call If Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "canonicalize", + "source_code": "def canonicalize(self):\n sizevars = V.graph.sizevars\n sizes = self.get_size()\n strides = self.get_stride()\n strides = [sizevars.size_hint(x) for x in strides]\n index_vars = [sympy_index_symbol(f'd{i}') for i in range(len(sizes))]\n index_order = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n lookup = {pos: idx for idx, pos in enumerate(index_order)}\n order = [lookup[i] for i in range(len(lookup))]\n index_vars = [index_vars[i] for i in order]\n indexer = self.make_indexer()\n index = indexer(index_vars)\n new_sizes, reindex, _prune = V.graph.sizevars._simplify_loops(index_vars, sizes, [index])\n _, add_var = var_builder('c')\n replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))\n index = sympy_subs(sympy.expand(index), replacement)\n return (index, tuple(new_sizes))", + "docstring": "Manually get canonicalization of the output index", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:canonicalize arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_intercept_only", + "source_code": "def fit_intercept_only(self, y_true, sample_weight=None):\n if sample_weight is None:\n median = np.percentile(y_true, 50, axis=0)\n else:\n median = _weighted_percentile(y_true, sample_weight, 50)\n diff = y_true - median\n term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))\n return median + np.average(term, weights=sample_weight)", + "docstring": "Compute raw_prediction of an intercept-only model. This is the weighted median of the target, i.e. over the samples axis=0.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:fit_intercept_only arg:self arg:y_true arg:sample_weight arguments arg arg arg If Compare Assign Call Assign Call Assign Assign Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "drop_add_residual_stochastic_depth_list", + "source_code": "def drop_add_residual_stochastic_depth_list(x_list: List[Tensor], residual_func: Callable[[Tensor, Any], Tensor], sample_drop_ratio: float=0.0, scaling_vector=None) -> Tensor:\n branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]\n branges = [s[0] for s in branges_scales]\n residual_scale_factors = [s[1] for s in branges_scales]\n attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)\n residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias))\n outputs = []\n for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):\n outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))\n return outputs", + "docstring": "Add residual connections to a list of tensors.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py", + "ast_data": "FunctionDef name:drop_add_residual_stochastic_depth_list arg:x_list arg:residual_func arg:sample_drop_ratio arg:scaling_vector arguments arg arg arg arg Assign Call Assign Assign Assign Call Assign Call Call Assign For Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "antiderivative", + "source_code": "def antiderivative(self, nu):\n p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)\n for axis, n in enumerate(nu):\n p._antiderivative_inplace(n, axis)\n p._ensure_c_contiguous()\n return p", + "docstring": "Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:antiderivative arg:self arg:nu arguments arg arg Assign Call Call For Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_near_v2", + "source_code": "@tf_export('debugging.assert_near', v1=[])\n@dispatch.register_binary_elementwise_assert_api\n@dispatch.add_dispatch_support\ndef assert_near_v2(x, y, rtol=None, atol=None, message=None, summarize=None, name=None):\n return assert_near(x=x, y=y, rtol=rtol, atol=atol, summarize=summarize, message=message, name=name)", + "docstring": "Assert the condition and are close element-wise. This Op checks that holds for every pair of (possibly broadcast) elements of and . If both and are empty, this is trivially satisfied. If any elements of and are not close, , as well as the first entries of and are printed, and is raised. The default and is , where is the smallest representable positive number such that . This is about in , in , and in . See . Args: x: Float or complex . y: Float or complex , same dtype as and broadcastable to . rtol: . Same as, and broadcastable to, . The relative tolerance. Default is . atol: . Same as, and broadcastable to, . The absolute tolerance. Default is . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_near\". Returns: Op that raises if and are not close enough. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False for any pair of elements in and . The check can be performed immediately during eager execution or if and are statically known. @compatibility(numpy) Similar to , except tolerance depends on data type. This is due to the fact that is often used with , , and even data. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_near_v2 arg:x arg:y arg:rtol arg:atol arg:message arg:summarize arg:name arguments arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_resolve_prefix", + "source_code": "def _resolve_prefix(self, token):\n if token in self._handlers:\n return token\n elif token in self._alias_to_prefix:\n return self._alias_to_prefix[token]\n else:\n return None", + "docstring": "Resolve command prefix from the prefix itself or its alias. Args: token: a str to be resolved. Returns: If resolvable, the resolved command prefix. If not resolvable, None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:_resolve_prefix arg:self arg:token arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:no" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X):\n y_prob = self.predict_proba(X)\n return np.log(y_prob, out=y_prob)", + "docstring": "Return the log of probability estimates. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input data. Returns ------- log_y_prob : ndarray of shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in . Equivalent to .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_find_process_group", + "source_code": "def _find_process_group(self):\n for period in reversed(self._periods):\n if self.step % period == 0:\n return self.period_process_group_dict[period]\n return None", + "docstring": "Return a process group as the value of an `` if not found.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\hierarchical_model_averager.py", + "ast_data": "FunctionDef name:_find_process_group arg:self arguments arg For Call If Compare Return return:yes Return return:no" + }, + { + "library": "scikit-learn", + "name": "_maybe_mark", + "source_code": "def _maybe_mark(estimator, check, expected_failed_checks: dict[str, str] | None=None, mark: Literal['xfail', 'skip', None]=None, pytest=None):\n should_be_marked, reason = _should_be_skipped_or_marked(estimator, check, expected_failed_checks)\n if not should_be_marked or mark is None:\n return (estimator, check)\n estimator_name = estimator.__class__.__name__\n if mark == 'xfail':\n return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))\n else:\n\n @wraps(check)\n def wrapped(*args, **kwargs):\n raise SkipTest(f'Skipping {_check_name(check)} for {estimator_name}: {reason}')\n return (estimator, wrapped)", + "docstring": "Mark the test as xfail or skip if needed. Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. check : partial or callable Check to be marked. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : \"xfail\" or \"skip\" or None Whether to mark the check as xfail or skip. pytest : pytest module, default=None Pytest module to use to mark the check. This is only needed if `\"xfail\"check_estimatorpytestparametrize_with_checks` only.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:_maybe_mark arg:estimator arg:check arg:expected_failed_checks arg:mark arg:pytest arguments arg arg arg arg arg Assign Call If BoolOp Compare Return return:yes Assign If Compare Return return:yes Call Call FunctionDef name:wrapped arguments arg arg Raise Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "build_pyramid", + "source_code": "def build_pyramid(input: Tensor, max_level: int, border_type: str='reflect', align_corners: bool=False) -> list[Tensor]:\n KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n KORNIA_CHECK(isinstance(max_level, int) or max_level < 0, f'Invalid max_level, it must be a positive integer. Got: {max_level}')\n pyramid: list[Tensor] = []\n pyramid.append(input)\n for _ in range(max_level - 1):\n img_curr: Tensor = pyramid[-1]\n img_down: Tensor = pyrdown(img_curr, border_type, align_corners)\n pyramid.append(img_down)\n return pyramid", + "docstring": "Construct the Gaussian pyramid for a tensor image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Gaussian pyramid by recursively applying pyrDown to the previously built pyramid layers. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)[(B, C, H, W), (B, C, H/2, W/2), ...]`", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py", + "ast_data": "FunctionDef name:build_pyramid arg:input arg:max_level arg:border_type arg:align_corners arguments arg arg arg arg Call Call BoolOp Call Compare Call For Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_insert_copy_of_subgraph_a_after_input_node_c", + "source_code": "def _insert_copy_of_subgraph_a_after_input_node_c(input_node_c: Union[Node, list[Node]], input_node_c_2: Optional[Union[Node, list[Node]]], subgraph_a: NSSubgraph, gm_a: GraphModule, gm_b: GraphModule, node_name_prefix: str) -> Node:\n assert isinstance(input_node_c, (Node, list))\n nodes_of_a = [subgraph_a.end_node]\n cur_node = subgraph_a.end_node\n while cur_node != subgraph_a.start_node:\n cur_node = get_normalized_nth_input(cur_node, gm_a, 0)\n nodes_of_a.insert(0, cur_node)\n cur_node_a = nodes_of_a[0]\n cur_node_c = _insert_copy_of_node_a_after_input_node_c(input_node_c, input_node_c_2, cur_node_a, gm_a, gm_b, node_name_prefix)\n for cur_idx_a in range(1, len(nodes_of_a)):\n cur_node_a = nodes_of_a[cur_idx_a]\n prev_node_c = cur_node_c\n cur_node_c = _insert_copy_of_node_a_after_input_node_c(prev_node_c, None, cur_node_a, gm_a, gm_b, node_name_prefix)\n return cur_node_c", + "docstring": "TODO(before land): real docblock", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\graph_passes.py", + "ast_data": "FunctionDef name:_insert_copy_of_subgraph_a_after_input_node_c arg:input_node_c arg:input_node_c_2 arg:subgraph_a arg:gm_a arg:gm_b arg:node_name_prefix arguments arg arg arg arg arg arg Call Assign Assign While Compare Assign Call Call Assign Assign Call For Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "warp_affine3d", + "source_code": "def warp_affine3d(src: Tensor, M: Tensor, dsize: tuple[int, int, int], flags: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n if len(src.shape) != 5:\n raise AssertionError(src.shape)\n if not (len(M.shape) == 3 and M.shape[-2:] == (3, 4)):\n raise AssertionError(M.shape)\n if len(dsize) != 3:\n raise AssertionError(dsize)\n B, C, D, H, W = src.size()\n size_src: tuple[int, int, int] = (D, H, W)\n size_out: tuple[int, int, int] = dsize\n M_4x4 = convert_affinematrix_to_homography3d(M)\n dst_norm_trans_src_norm: Tensor = normalize_homography3d(M_4x4, size_src, size_out)\n src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm)\n P_norm: Tensor = src_norm_trans_dst_norm[:, :3]\n dsize_out: list[int] = [B, C, *list(size_out)]\n grid = F.affine_grid(P_norm, dsize_out, align_corners=align_corners)\n return F.grid_sample(src, grid, align_corners=align_corners, mode=flags, padding_mode=padding_mode)", + "docstring": "Apply a projective transformation a to 3d tensor. .. warning:: This API signature it is experimental and might suffer some changes in the future. Args: src : input tensor of shape :math:. M: projective transformation matrix of shape :math:. dsize: size of the output image (depth, height, width). flags: interpolation mode to calculate output values `(B, C, D, H, W)get_perspective_transform3d`.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", + "ast_data": "FunctionDef name:warp_affine3d arg:src arg:M arg:dsize arg:flags arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg If Compare Call Raise Call If BoolOp Compare Call Compare Raise Call If Compare Call Raise Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "restart", + "source_code": "def restart(self):\n self.execv = True\n self.exit()", + "docstring": "Restart the process (may close connections). This method does not restart the process from the calling thread; instead, it stops the bus and asks the main thread to call execv.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", + "ast_data": "FunctionDef name:restart arg:self arguments arg Assign Call" + }, + { + "library": "authlib", + "name": "query_client", + "source_code": "def query_client(self, client_id):\n raise NotImplementedError()", + "docstring": "Query OAuth client by client_id. The client model class MUST implement the methods described by :class:.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:query_client arg:self arg:client_id arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "statically_known_leq", + "source_code": "def statically_known_leq(self, left: Expr, right: Union[Expr, int]) -> bool:\n expr = left <= right\n return self.is_expr_static_and_true(expr)", + "docstring": "Returns a bool indicating if it is sound to optimize as if left is less than or equal to right.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:statically_known_leq arg:self arg:left arg:right arguments arg arg arg Assign Compare Return return:yes Call" + }, + { + "library": "kornia", + "name": "eye_like", + "source_code": "def eye_like(n: int, input: Tensor, shared_memory: bool=False) -> Tensor:\n if n <= 0:\n raise AssertionError(type(n), n)\n if len(input.shape) < 1:\n raise AssertionError(input.shape)\n identity = eye(n, device=input.device).type(input.dtype)\n return identity[None].expand(input.shape[0], n, n) if shared_memory else identity[None].repeat(input.shape[0], 1, 1)", + "docstring": "Return a 2-D tensor with ones on the diagonal and zeros elsewhere with the same batch size as the input. Args: n: the number of rows :math:. input: image tensor that will determine the batch size of the output matrix. The expected shape is :math:. shared_memory: when set, all samples in the batch will share the same memory. Returns: The identity matrix with the same batch size as the input :math:. Notes: When the dimension to expand is of size 1, using torch.expand(...) yields the same tensor as torch.repeat(...) without using extra memory. Thus, when the tensor obtained by this method will be later assigned - use this method with shared_memory=False, otherwise, prefer using it with shared_memory=True.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\misc.py", + "ast_data": "FunctionDef name:eye_like arg:n arg:input arg:shared_memory arguments arg arg arg If Compare Raise Call Call If Compare Call Raise Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "solve_toeplitz", + "source_code": "def solve_toeplitz(c_or_cr, b, check_finite=True):\n c, r = c_or_cr if isinstance(c_or_cr, tuple) else (c_or_cr, np.conjugate(c_or_cr))\n return _solve_toeplitz(c, r, b, check_finite)", + "docstring": "Solve the equation `c_or_crsolve_toeplitzbb`. >>> T = toeplitz(c, r) >>> T.dot(x) array([ 1., 2., 2., 5.])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_basic.py", + "ast_data": "FunctionDef name:solve_toeplitz arg:c_or_cr arg:b arg:check_finite arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "content", + "source_code": "@content.setter\ndef content(self, value):\n HttpResponse.content.fset(self, value)\n self._is_rendered = True", + "docstring": "Set the content for the response.", + "type": "method", + "file_path": "django\\django\\template\\response.py", + "ast_data": "FunctionDef name:content arg:self arg:value arguments arg arg Call Assign" + }, + { + "library": "tensorflow", + "name": "record_operation", + "source_code": "def record_operation(op_type, output_tensors, input_tensors, backward_function, forward_function=None):\n pywrap_tfe.TFE_Py_TapeSetRecordOperation(op_type, output_tensors, input_tensors, backward_function, forward_function)", + "docstring": "Records the operation on all tapes in the stack.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", + "ast_data": "FunctionDef name:record_operation arg:op_type arg:output_tensors arg:input_tensors arg:backward_function arg:forward_function arguments arg arg arg arg arg Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, head_length=0.5, head_width=0.5, tail_width=0.2):\n self.head_length, self.head_width, self.tail_width = (head_length, head_width, tail_width)\n super().__init__()", + "docstring": "Parameters ---------- head_length : float, default: 0.5 Length of the arrow head. head_width : float, default: 0.5 Width of the arrow head. tail_width : float, default: 0.2 Width of the arrow tail.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:head_length arg:head_width arg:tail_width arguments arg arg arg arg Assign Call Call" + }, + { + "library": "sphinx", + "name": "create", + "source_code": "def create(self, name: str) -> Theme:\n if name in self._entry_point_themes:\n entry_point_loader = self._entry_point_themes[name]\n entry_point_loader()\n if name not in self._themes:\n raise ThemeError(__('no theme named %r found (missing theme.toml?)') % name)\n themes, theme_dirs, tmp_dirs = _load_theme_with_ancestors(name, self._themes, self._entry_point_themes)\n return Theme(name, configs=themes, paths=theme_dirs, tmp_dirs=tmp_dirs)", + "docstring": "Create an instance of theme.", + "type": "method", + "file_path": "sphinx\\sphinx\\theming.py", + "ast_data": "FunctionDef name:create arg:self arg:name arguments arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_swap_modules", + "source_code": "def _swap_modules(ep: ExportedProgram, modules_to_swap: dict[str, torch.nn.Module]) -> torch.fx.GraphModule:\n module_call_graph = {entry.fqn: entry.signature for entry in ep.module_call_graph if entry.signature}\n gm = ep.module()\n gm.validate_inputs = False\n gm.graph.eliminate_dead_code()\n assert isinstance(gm, torch.fx.GraphModule)\n _fix_input_output_signature(gm, ep.module_call_graph[0].signature)\n gm.module_call_graph = ep.module_call_graph\n gm.train = types.MethodType(type(gm).train, gm)\n gm.eval = types.MethodType(type(gm).eval, gm)\n assert isinstance(gm, torch.fx.GraphModule)\n gm = _swap_module_helper(gm, modules_to_swap, module_call_graph)\n return gm", + "docstring": "Unlifts the given ExportedProgram into a fx.GraphModule, and then swaps previously traced modules with new eager modules specified. Returns a fx.GraphModule with a custom forward function. Args: ep (ExportedProgram): Exported program to modify modules_to_swap (Dict[str, torch.nn.Module]): Mapping from module fqn to eager module to swap with. The specified module fqn should have also been specified in the argument to torch.export so that we know how to restore the calling convention to this argument. run_with_interpreter: Whether or not to run the graph using fx.Interpreter. Setting to true will help result in better error messages and easier debugging, but it has found to result in a QPS drop.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_swap.py", + "ast_data": "FunctionDef name:_swap_modules arg:ep arg:modules_to_swap arguments arg arg Assign Assign Call Assign Call Call Call Assign Assign Call Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_serialize", + "source_code": "def _serialize(self):\n result = list()\n for route in self._routes:\n result.append({'caller': route.caller, 'callee': route.callee})\n return result", + "docstring": "Serialize the object. Returns ------- obj : list A serialized version of the instance in the form of a list.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:_serialize arg:self arguments arg Assign Call For Call Return return:yes" + }, + { + "library": "scipy", + "name": "f", + "source_code": "@property\ndef f(self) -> np.ndarray:\n if self.fft_mode in {'onesided', 'onesided2X'}:\n return fft_lib.rfftfreq(self.mfft, self.T)\n elif self.fft_mode == 'twosided':\n return fft_lib.fftfreq(self.mfft, self.T)\n elif self.fft_mode == 'centered':\n return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T))\n fft_modes = get_args(FFT_MODE_TYPE)\n raise RuntimeError(f'self.fft_mode={self.fft_mode!r} not in {fft_modes}!')", + "docstring": "Frequencies values of the STFT. A 1d array of length with spaced entries is returned. See Also -------- delta_f: Width of the frequency bins of the STFT. f_pts: Number of points along the frequency axis. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:f arg:self arguments arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Assign Call Raise Call" + }, + { + "library": "pytorch", + "name": "_extract_fake_inputs", + "source_code": "def _extract_fake_inputs(gm, args, kwargs):\n fake_inps: list[Any] = []\n fake_vals: list[Any] = []\n for node in gm.graph.nodes:\n if node.op == 'placeholder':\n fake_inps.append(node.meta.get('val'))\n else:\n fake_vals.append(node.meta.get('example_value'))\n detected_fake_mode = detect_fake_mode(fake_inps + fake_vals)\n detected_shape_env = detect_shape_env(fake_inps + fake_vals)\n if detected_fake_mode:\n if detected_shape_env:\n assert detected_shape_env is detected_fake_mode.shape_env, \"Detected shape env does not match fake mode's shape env\"\n fake_mode = detected_fake_mode\n elif detected_shape_env:\n fake_mode = FakeTensorMode(shape_env=detected_shape_env, export=True)\n else:\n fake_mode = FakeTensorMode(shape_env=ShapeEnv(), export=True)\n count = 0\n\n def lookup_fake(x):\n nonlocal count\n val = fake_inps[count] if isinstance(x, (int, torch.Tensor)) else x\n count += 1\n return val\n fake_args = pytree.tree_map(lookup_fake, args)\n fake_kwargs = pytree.tree_map(lookup_fake, kwargs)\n return (fake_args, fake_kwargs, fake_mode)", + "docstring": "Given a graph module, extract fakified input tensors from the metadata of its placeholders, and map them to the structure of given args and kwargs. Also return the fake mode used to fakify those inputs.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_extract_fake_inputs arg:gm arg:args arg:kwargs arguments arg arg arg For If Compare Call Call Call Call Assign Call Assign Call If If Compare Assign If Assign Call Assign Call Call Assign FunctionDef name:lookup_fake arg:x arguments arg Assign Call Return return:yes Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "correct_covariance", + "source_code": "def correct_covariance(self, data):\n n_samples = len(self.dist_)\n n_support = np.sum(self.support_)\n if n_support < n_samples and np.allclose(self.raw_covariance_, 0):\n raise ValueError('The covariance matrix of the support data is equal to 0, try to increase support_fraction')\n correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)\n covariance_corrected = self.raw_covariance_ * correction\n self.dist_ /= correction\n return covariance_corrected", + "docstring": "Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [RVD]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : ndarray of shape (n_features, n_features) Corrected robust covariance estimate. References ---------- .. [RVD] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_robust_covariance.py", + "ast_data": "FunctionDef name:correct_covariance arg:self arg:data arguments arg arg Assign Call Assign Call If BoolOp Compare Call Raise Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "numel", + "source_code": "def numel(self):\n shape = self.shape\n\n def _prod(xs):\n return functools.reduce(operator.mul, xs, 1)\n return _prod(shape)", + "docstring": "Returns the number of elements (not accounting for sparsity) in the mask.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:numel arg:self arguments arg Assign FunctionDef name:_prod arg:xs arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "to_ss", + "source_code": "def to_ss(self):\n return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), **self._dt_dict)", + "docstring": "Convert system representation to . Returns ------- sys : instance of State space model of the current system", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_ss arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "to_local", + "source_code": "def to_local(self, *, grad_placements: Optional[Sequence[Placement]]=None) -> torch.Tensor:\n if not torch.is_grad_enabled():\n return self._local_tensor\n if grad_placements is not None and (not isinstance(grad_placements, tuple)):\n grad_placements = tuple(grad_placements)\n return _ToTorchTensor.apply(self, grad_placements)", + "docstring": "Get the local tensor of this DTensor on its current rank. For sharding it returns a local shard of the logical tensor view, for replication it returns the replica on its current rank. Keyword args: grad_placements (List[:class:], optional): the placements describes the future layout of any gradient layout of the Tensor returned from this function. converts DTensor to local tensor and the returned local tensor might not be used as the original DTensor layout later in the code. This argument is the hint that user can give to autograd in case the gradient layout of the returned tensor does not match the original DTensor layout. If not specified, we will assume the gradient layout remains the same as the original DTensor and use that for gradient computation. Returns: A :class: or `DTensor` requires_grad or not.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:to_local arg:self arguments arg arg If Call Return return:yes If BoolOp Compare Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "compute_stats", + "source_code": "def compute_stats(array):\n q1 = np.percentile(array, 25)\n q3 = np.percentile(array, 75)\n low = q1 - 1.5 * (q3 - q1)\n high = q3 + 1.5 * (q3 - q1)\n filtered_array = list(filter(lambda x: low <= x and x <= high, array))\n mean = np.mean(filtered_array)\n min_val = np.min(filtered_array)\n max_val = np.max(filtered_array)\n max_diff = max(max_val - mean, mean - min_val)\n diff = max_diff / mean * 100.0\n return (mean, diff)", + "docstring": "Reports mean and ± range for the given array. The range computation follows benchstat's. Args: array: The array to compute stats for. Returns: mean and ± %diff range.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\keras\\benchmark.py", + "ast_data": "FunctionDef name:compute_stats arg:array arguments arg Assign Call Assign Call Assign Assign Assign Call Call arguments arg BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "rot90", + "source_code": "@tf_export('image.rot90')\n@dispatch.add_dispatch_support\ndef rot90(image, k=1, name=None):\n with ops.name_scope(name, 'rot90', [image, k]) as scope:\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')\n k.get_shape().assert_has_rank(0)\n k = math_ops.mod(k, 4)\n shape = image.get_shape()\n if shape.ndims is None:\n rank = array_ops.rank(image)\n\n def f_rank3():\n return _rot90_3D(image, k, scope)\n\n def f_rank4():\n return _rot90_4D(image, k, scope)\n return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n elif shape.ndims == 3:\n return _rot90_3D(image, k, scope)\n elif shape.ndims == 4:\n return _rot90_4D(image, k, scope)\n else:\n raise ValueError(\"'image' (shape %s) must have either 3 or 4 dimensions.\" % shape)", + "docstring": "Rotate image(s) by 90 degrees. For example: >>> a=tf.constant([[[1],[2]], ... [[3],[4]]]) >>> # rotating counter clockwise by 90 degrees >>> a_rot=tf.image.rot90(a) >>> print(a_rot[...,0].numpy()) [[2 4] [1 3]] >>> # rotating counter clockwise by 270 degrees >>> a_rot=tf.image.rot90(a, k=3) >>> print(a_rot[...,0].numpy()) [[3 1] [4 2]] >>> # rotating clockwise by 180 degrees >>> a_rot=tf.image.rot90(a, k=-2) >>> print(a_rot[...,0].numpy()) [[4 3] [2 1]] Args: image: 4-D Tensor of shape or 3-D Tensor of shape . k: A scalar integer tensor. The number of times the image(s) are rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as . Raises: ValueError: if the shape of not supported.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:rot90 arg:image arg:k arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call If Compare Assign Call FunctionDef name:f_rank3 arguments Return return:yes Call FunctionDef name:f_rank4 arguments Return return:yes Call Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "post_compile", + "source_code": "def post_compile(self, example_inputs: Sequence[InputType], constants: CompiledFxGraphConstants, graph_kwargs: _CompileFxKwargs) -> None:\n set_tracing_context_output_strides(example_inputs, self)\n assert graph_kwargs['cudagraphs'] is not None\n assert graph_kwargs['is_backward'] is not None\n is_backward = graph_kwargs['is_backward']\n cudagraphs: BoxedBool = graph_kwargs['cudagraphs']\n if cudagraphs:\n if self.disabled_cudagraphs_reason:\n if 'cuda' in self.device_types:\n log_cudagraph_skip_and_bump_counter(f'skipping cudagraphs due to {self.disabled_cudagraphs_reason}')\n else:\n counters['inductor']['cudagraph_skips'] += 1\n BoxedBool.disable(cudagraphs)\n else:\n if is_backward:\n assert 'boxed_forward_device_index' in graph_kwargs\n boxed_forward_device_index = graph_kwargs['boxed_forward_device_index']\n else:\n boxed_forward_device_index = graph_kwargs.get('boxed_forward_device_index', None)\n if config.graph_partition:\n cudagraph_partition_post_compile(example_inputs, self, cudagraphs, constants.unwrap(self), boxed_forward_device_index)\n else:\n cudagraph_post_compile(example_inputs, self, cudagraphs, constants.unwrap(self), boxed_forward_device_index)\n inputs_to_check = self.inputs_to_check\n maybe_realign_inputs(cudagraphs, self, inputs_to_check)", + "docstring": "Run a set of post processing steps after loading from the cache. These involve: - Setting the tracing context output strides - Running cudagraphs if enabled - Realigning inputs This runs whether or not we have a cache hit, and always runs directly after we get a CompiledFxGraph. The results of this function are *not* saved in the cache itself.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\output_code.py", + "ast_data": "FunctionDef name:post_compile arg:self arg:example_inputs arg:constants arg:graph_kwargs arguments arg arg arg arg Call Compare Compare Assign If If If Compare Call Call If Compare Assign Assign Call If Call Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "update_state", + "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n if y_pred.shape.ndims > 1:\n y_pred = array_ops.reshape(y_pred, [-1])\n if y_true.shape.ndims > 1:\n y_true = array_ops.reshape(y_true, [-1])\n if sample_weight is not None:\n sample_weight = math_ops.cast(sample_weight, self._dtype)\n if sample_weight.shape.ndims > 1:\n sample_weight = array_ops.reshape(sample_weight, [-1])\n current_cm = confusion_matrix.confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype)\n return self.total_cm.assign_add(current_cm)", + "docstring": "Accumulates the confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_and_validate_objects", + "source_code": "def set_and_validate_objects(self, object_dict):\n for key in self.all_checkpointable_objects:\n if key in object_dict:\n if not isinstance(object_dict[key], trackable.Trackable):\n raise ValueError('Object dictionary contained a non-trackable object: {} (for key {})'.format(object_dict[key], key))\n self._object_dict[key] = object_dict[key]\n setattr(self._keras_trackable, key, object_dict[key])\n else:\n raise ValueError('Object {} missing from serialized object dict.'.format(key))\n return self.checkpointable_objects", + "docstring": "Saves objects to a dictionary, and validates the values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "FunctionDef name:set_and_validate_objects arg:self arg:object_dict arguments arg arg For If Compare If Call Raise Call Call Assign Call Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "task_type", + "source_code": "@property\ndef task_type(self):\n return self._task_type", + "docstring": "Returns the role of the corresponding task.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:task_type arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "deserialize", + "source_code": "def deserialize(proto):\n _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None))\n if type_registrations is not None:\n for type_registration in type_registrations:\n if type_registration.should_load(proto):\n return (type_registration.from_proto(proto), type_registration.setter)\n return None", + "docstring": "Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to when they are available.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py", + "ast_data": "FunctionDef name:deserialize arg:proto arguments arg Assign Call If Compare For If Call Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "_variable_shape", + "source_code": "@abc.abstractproperty\ndef _variable_shape(self):\n pass", + "docstring": "of , without batch dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_variable_shape arg:self arguments arg" + }, + { + "library": "scipy", + "name": "dunnett", + "source_code": "@_transition_to_rng('random_state', replace_doc=False)\ndef dunnett(*samples: 'npt.ArrayLike', control: 'npt.ArrayLike', alternative: Literal['two-sided', 'less', 'greater']='two-sided', rng: SeedType=None) -> DunnettResult:\n samples_, control_, rng = _iv_dunnett(samples=samples, control=control, alternative=alternative, rng=rng)\n rho, df, n_group, n_samples, n_control = _params_dunnett(samples=samples_, control=control_)\n statistic, std, mean_control, mean_samples = _statistic_dunnett(samples_, control_, df, n_samples, n_control)\n pvalue = _pvalue_dunnett(rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng)\n return DunnettResult(statistic=statistic, pvalue=pvalue, _alternative=alternative, _rho=rho, _df=df, _std=std, _mean_samples=mean_samples, _mean_control=mean_control, _n_samples=n_samples, _n_control=n_control, _rng=rng)", + "docstring": "Dunnett's test: multiple comparisons of means against a control group. This is an implementation of Dunnett's original, single-step test as described in [1]_. Parameters ---------- sample1, sample2, ... : 1D array_like The sample measurements for each experimental group. control : 1D array_like The sample measurements for the control group. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. The null hypothesis is that the means of the distributions underlying the samples and control are equal. The following alternative hypotheses are available (default is 'two-sided'): * 'two-sided': the means of the distributions underlying the samples and control are unequal. * 'less': the means of the distributions underlying the samples are less than the mean of the distribution underlying the control. * 'greater': the means of the distributions underlying the samples are greater than the mean of the distribution underlying the control. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `SPEC-007 numpy.random.RandomStatenumpy.random.Generatorrandom_staterngrandom_staterandom_state~scipy.stats._result_classes.DunnettResulthypothesis_dunnetttukey_hsd10.1080/01621459.1955.1050129410.1152/jappl.1969.26.5.535hypothesis_dunnett`.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_multicomp.py", + "ast_data": "FunctionDef name:dunnett arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_batch_mv", + "source_code": "def _batch_mv(bmat, bvec):\n return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)", + "docstring": "Performs a batched matrix-vector product, with compatible but different batch shapes. This function takes as input , containing :math: matrices, and , containing length :math: vectors. Both and may have any number of leading dimensions, which correspond to a batch shape. They are not necessarily assumed to have the same batch shape, just ones which can be broadcasted.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\multivariate_normal.py", + "ast_data": "FunctionDef name:_batch_mv arg:bmat arg:bvec arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "pad", + "source_code": "@tf_export.tf_export('experimental.numpy.pad', v1=[])\n@np_utils.np_doc('pad')\ndef pad(array, pad_width, mode, **kwargs):\n constant_values = kwargs.get('constant_values', 0)\n if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):\n raise ValueError('Unsupported padding mode: ' + mode)\n mode = mode.upper()\n array = asarray(array)\n pad_width = asarray(pad_width, dtype=dtypes.int32)\n return array_ops.pad(tensor=array, paddings=pad_width, mode=mode, constant_values=constant_values)", + "docstring": "Only supports modes 'constant', 'reflect' and 'symmetric' currently.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:pad arg:array arg:pad_width arg:mode arguments arg arg arg arg Assign Call If BoolOp Compare Compare Compare Raise Call Assign Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "seaborn", + "name": "data_variable", + "source_code": "@property\ndef data_variable(self):\n if not self.univariate:\n raise AttributeError('This is not a univariate plot')\n return {'x', 'y'}.intersection(self.variables).pop()", + "docstring": "Return the variable with data for univariate plots.", + "type": "method", + "file_path": "seaborn\\seaborn\\distributions.py", + "ast_data": "FunctionDef name:data_variable arg:self arguments arg If Raise Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_exp_sinch", + "source_code": "def _exp_sinch(a, x):\n if abs(x) < 0.0135:\n x2 = x * x\n return np.exp(a) * (1 + x2 / 6.0 * (1 + x2 / 20.0 * (1 + x2 / 42.0)))\n else:\n return (np.exp(a + x) - np.exp(a - x)) / (2 * x)", + "docstring": "Stably evaluate exp(a)*sinh(x)/x Notes ----- The strategy of falling back to a sixth order Taylor expansion was suggested by the Spallation Neutron Source docs which was found on the internet by google search. The details of the cutoff point and the Horner-like evaluation was picked without reference to anything in particular. Note that sinch is not currently implemented in scipy.special, whereas the \"engineer's\" definition of sinc is implemented. The implementation of sinc involves a scaling factor of pi that distinguishes it from the \"mathematician's\" version of sinc.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:_exp_sinch arg:a arg:x arguments arg arg If Compare Call Assign Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "summary_writer_function", + "source_code": "def summary_writer_function(name, tensor, function, family=None):\n name_scope = ops.get_name_scope()\n if name_scope:\n name_scope += '/'\n\n def record():\n with ops.name_scope(name_scope), summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):\n with ops.control_dependencies([function(tag, scope)]):\n return constant_op.constant(True)\n if _summary_state.writer is None:\n return control_flow_ops.no_op()\n with ops.device('cpu:0'):\n op = smart_cond.smart_cond(_legacy_contrib_should_record_summaries(), record, _nothing, name='')\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n return op", + "docstring": "Helper function to write summaries. Args: name: name of the summary tensor: main tensor to form the summary function: function taking a tag and a scope which writes the summary family: optional, the summary's family Returns: The result of writing the summary.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:summary_writer_function arg:name arg:tensor arg:function arg:family arguments arg arg arg arg Assign Call If FunctionDef name:record arguments With Call Call With Call Call Return return:yes Call If Compare Return return:yes Call With Call Assign Call Call If Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "close_streams", + "source_code": "@classmethod\ndef close_streams(cls, v, depth=0):\n if depth > 10:\n return\n if isinstance(v, StreamWrapper):\n v.close()\n elif isinstance(v, dict):\n for vv in v.values():\n cls.close_streams(vv, depth=depth + 1)\n elif isinstance(v, (list, tuple)):\n for vv in v:\n cls.close_streams(vv, depth=depth + 1)", + "docstring": "Traverse structure and attempts to close all found StreamWrappers on best effort basis.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\common.py", + "ast_data": "FunctionDef name:close_streams arg:cls arg:v arg:depth arguments arg arg arg If Compare Return return:no If Call Call If Call For Call Call If Call For Call" + }, + { + "library": "pytorch", + "name": "transform", + "source_code": "def transform(self, map_fn: Callable[[str], str]) -> 'FunctionCounts':\n counts: collections.defaultdict[str, int] = collections.defaultdict(int)\n for c, fn in self._data:\n counts[map_fn(fn)] += c\n return self._from_dict(counts, self.inclusive)", + "docstring": "Apply to all of the function names. This can be used to regularize function names (e.g. stripping irrelevant parts of the file path), coalesce entries by mapping multiple functions to the same name (in which case the counts are added together), etc.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", + "ast_data": "FunctionDef name:transform arg:self arg:map_fn arguments arg arg Call For Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "save", + "source_code": "def save(self, name: Optional[str]=None, n_row: Optional[int]=None) -> None:\n if name is None:\n name = f'Kornia-{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}.jpg'\n if len(self._output_image.shape) == 3:\n out_image = self._output_image\n if len(self._output_image.shape) == 4:\n if n_row is None:\n n_row = math.ceil(self._output_image.shape[0] ** 0.5)\n out_image = kornia.utils.image.make_grid(self._output_image, n_row, padding=2)\n kornia.io.write_image(name, out_image.mul(255.0).byte())", + "docstring": "Save the output image(s) to a directory. Args: name: Directory to save the images. n_row: Number of images displayed in each row of the grid.", + "type": "method", + "file_path": "kornia\\kornia\\core\\module.py", + "ast_data": "FunctionDef name:save arg:self arg:name arg:n_row arguments arg arg arg If Compare Assign Call Call If Compare Call Assign If Compare Call If Compare Assign Call Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "update_sub", + "source_code": "@doc_controls.do_not_generate_docs\ndef update_sub(x, decrement):\n return state_ops.assign_sub(x, decrement)", + "docstring": "Update the value of by subtracting . Args: x: A Variable. decrement: A tensor of same shape as . Returns: The variable updated.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:update_sub arg:x arg:decrement arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "generate_detector_report", + "source_code": "def generate_detector_report(self, model: nn.Module) -> tuple[str, dict[str, Any]]:\n per_channel_info = self._detect_per_channel_helper(model)\n further_optims_str = f'Further Optimizations for backend {self.backend_chosen}: \\n'\n optimizations_possible = False\n for fqn in per_channel_info:\n fqn_dict = per_channel_info[fqn]\n if fqn_dict[self.PER_CHAN_SUPPORTED_KEY] and (not fqn_dict[self.PER_CHAN_USED_KEY]):\n optimizations_possible = True\n further_optims_str += f'Module {fqn} can be configured to use per_channel quantization.\\n'\n if optimizations_possible:\n further_optims_str += 'To use per_channel quantization, make sure the qconfig has a per_channel weight observer.'\n else:\n further_optims_str += 'No further per_channel optimizations possible.'\n return (further_optims_str, per_channel_info)", + "docstring": "Checks if any Linear or Conv layers in the model utilize per_channel quantization. Only Linear and Conv layers can use per_channel as of now so only these two are currently checked. Looks at q_config format and backend to determine if per_channel can be utilized. Uses the DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES structure to determine support Args: model: The prepared and calibrated model we want to check if using per_channel Returns a tuple with two elements: String report of potential actions to improve model (if per_channel quantization is available in backend) Dictionary mapping per_channel quantizable elements to: whether per_channel quantization is supported by the backend if it is being utilized in the current model", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:generate_detector_report arg:self arg:model arguments arg arg Assign Call Assign Assign For Assign If BoolOp Assign If Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_bounds", + "source_code": "def get_bounds(self):\n return self._bounds", + "docstring": "Get the bounds of the spine.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "FunctionDef name:get_bounds arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "dirac_", + "source_code": "def dirac_(tensor, groups=1):\n dimensions = tensor.ndimension()\n if dimensions not in [3, 4, 5]:\n raise ValueError('Only tensors with 3, 4, or 5 dimensions are supported')\n sizes = tensor.size()\n if sizes[0] % groups != 0:\n raise ValueError('dim 0 must be divisible by groups')\n out_chans_per_grp = sizes[0] // groups\n min_dim = min(out_chans_per_grp, sizes[1])\n with torch.no_grad():\n tensor.zero_()\n for g in range(groups):\n for d in range(min_dim):\n if dimensions == 3:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1\n elif dimensions == 4:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1\n else:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1\n return tensor", + "docstring": "Fill the {3, 4, 5}-dimensional input with the Dirac delta function. Preserves the identity of the inputs in layers, where as many input channels are preserved as possible. In case of groups>1, each group of channels preserves identity Args: tensor: a {3, 4, 5}-dimensional groups (int, optional): number of groups in the conv layer (default: 1) Examples: >>> w = torch.empty(3, 16, 5, 5) >>> nn.init.dirac_(w) >>> w = torch.empty(3, 24, 5, 5) >>> nn.init.dirac_(w, 3)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\init.py", + "ast_data": "FunctionDef name:dirac_ arg:tensor arg:groups arguments arg arg Assign Call If Compare Raise Call Assign Call If Compare Raise Call Assign Assign Call With Call Call For Call For Call If Compare Assign Call If Compare Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "SphinxInfoLogRecord", + "source_code": "class SphinxInfoLogRecord(SphinxLogRecord):\n prefix = ''", + "docstring": "Info log record class supporting location", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "ClassDef name:SphinxInfoLogRecord Assign" + }, + { + "library": "numpy", + "name": "argmax", + "source_code": "def argmax(self, axis=None, fill_value=None, out=None, *, keepdims=np._NoValue):\n if fill_value is None:\n fill_value = maximum_fill_value(self._data)\n d = self.filled(fill_value).view(ndarray)\n keepdims = False if keepdims is np._NoValue else bool(keepdims)\n return d.argmax(axis, out=out, keepdims=keepdims)", + "docstring": "Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2])", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:argmax arg:self arg:axis arg:fill_value arg:out arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Assign Compare Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "is_open", + "source_code": "@property\ndef is_open(self) -> bool:\n if self._handle is None:\n return False\n return bool(self._handle.isopen)", + "docstring": "return a boolean indicating whether the file is open", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:is_open arg:self arguments arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_tick_space", + "source_code": "def get_tick_space(self):\n raise NotImplementedError()", + "docstring": "Return the estimated number of ticks that can fit on the axis.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_tick_space arg:self arguments arg Raise Call" + }, + { + "library": "scipy", + "name": "spline_filter1d", + "source_code": "@docfiller\ndef spline_filter1d(input, order=3, axis=-1, output=np.float64, mode='mirror'):\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = np.asarray(input)\n complex_output = np.iscomplexobj(input)\n output = _ni_support._get_output(output, input, complex_output=complex_output)\n if complex_output:\n spline_filter1d(input.real, order, axis, output.real, mode)\n spline_filter1d(input.imag, order, axis, output.imag, mode)\n return output\n if order in [0, 1]:\n output[...] = np.array(input)\n else:\n mode = _ni_support._extend_mode_to_code(mode)\n axis = normalize_axis_index(axis, input.ndim)\n _nd_image.spline_filter1d(input, order, axis, output, mode)\n return output", + "docstring": "Calculate a 1-D spline filter along the given axis. The lines of the array along the given axis are filtered by a spline filter. The order of the spline must be >= 2 and 1prefiltermodemodeinput`, this function processes the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- We can filter an image using 1-D spline along the given axis: >>> from scipy.ndimage import spline_filter1d >>> import numpy as np >>> import matplotlib.pyplot as plt >>> orig_img = np.eye(20) # create an image >>> orig_img[10, :] = 1.0 >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) >>> f, ax = plt.subplots(1, 3, sharex=True) >>> for ind, data in enumerate([[orig_img, \"original image\"], ... [sp_filter_axis_0, \"spline filter (axis=0)\"], ... [sp_filter_axis_1, \"spline filter (axis=1)\"]]): ... ax[ind].imshow(data[0], cmap='gray_r') ... ax[ind].set_title(data[1]) >>> plt.tight_layout() >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_interpolation.py", + "ast_data": "FunctionDef name:spline_filter1d arg:input arg:order arg:axis arg:output arg:mode arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call Assign Call If Call Call Return return:yes If Compare Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "resize", + "source_code": "def resize(self, newshape, refcheck=True, order=False):\n errmsg = 'A masked array does not own its data and therefore cannot be resized.\\nUse the numpy.ma.resize function instead.'\n raise ValueError(errmsg)", + "docstring": ".. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:resize arg:self arg:newshape arg:refcheck arg:order arguments arg arg arg arg Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "is_aat_form", + "source_code": "def is_aat_form(operators):\n operators = list(operators)\n if not operators:\n raise ValueError('AAT form is undefined for empty operators')\n if len(operators) % 2:\n return False\n return all((is_adjoint_pair(operators[i], operators[-1 - i]) for i in range(len(operators) // 2)))", + "docstring": "Returns True if operators is of the form A @ A.H, possibly recursively.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", + "ast_data": "FunctionDef name:is_aat_form arg:operators arguments arg Assign Call If Raise Call If Call Return return:yes Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_tf_tensorarray_set_item", + "source_code": "def _tf_tensorarray_set_item(target, i, x):\n return target.write(i, x)", + "docstring": "Overload of set_item that stages a TensorArray write.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py", + "ast_data": "FunctionDef name:_tf_tensorarray_set_item arg:target arg:i arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_clang_compiler_path", + "source_code": "def set_clang_compiler_path(environ_cp):\n default_clang_path = '/usr/lib/llvm-18/bin/clang'\n if not os.path.exists(default_clang_path):\n default_clang_path = '/usr/lib/llvm-17/bin/clang'\n if not os.path.exists(default_clang_path):\n default_clang_path = '/usr/lib/llvm-16/bin/clang'\n if not os.path.exists(default_clang_path):\n default_clang_path = shutil.which('clang') or ''\n clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that TensorFlow now requires clang to compile. You may override this behavior by setting TF_NEED_CLANG=0')\n write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)\n write_to_bazelrc('build --repo_env=CC=%s' % clang_compiler_path)\n write_to_bazelrc('build --repo_env=BAZEL_COMPILER=%s' % clang_compiler_path)\n return clang_compiler_path", + "docstring": "Set CLANG_COMPILER_PATH and environment variables. Loop over user prompts for clang path until receiving a valid response. Default is used if no input is given. Set CLANG_COMPILER_PATH and write environment variables CC and BAZEL_COMPILER to .bazelrc. Args: environ_cp: (Dict) copy of the os.environ. Returns: string value for clang_compiler_path.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:set_clang_compiler_path arg:environ_cp arguments arg Assign If Call Assign If Call Assign If Call Assign BoolOp Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "exceeds_maximum_length_ratio", + "source_code": "def exceeds_maximum_length_ratio(password, max_similarity, value):\n pwd_len = len(password)\n length_bound_similarity = max_similarity / 2 * pwd_len\n value_len = len(value)\n return pwd_len >= 10 * value_len and value_len < length_bound_similarity", + "docstring": "Test that value is within a reasonable range of password. The following ratio calculations are based on testing SequenceMatcher like this: for i in range(0,6): print(10**i, SequenceMatcher(a='A', b='A'*(10**i)).quick_ratio()) which yields: 1 1.0 10 0.18181818181818182 100 0.019801980198019802 1000 0.001998001998001998 10000 0.00019998000199980003 100000 1.999980000199998e-05 This means a length_ratio of 10 should never yield a similarity higher than 0.2, for 100 this is down to 0.02 and for 1000 it is 0.002. This can be calculated via 2 / length_ratio. As a result we avoid the potentially expensive sequence matching.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\password_validation.py", + "ast_data": "FunctionDef name:exceeds_maximum_length_ratio arg:password arg:max_similarity arg:value arguments arg arg arg Assign Call Assign Assign Call Return return:yes BoolOp Compare Compare" + }, + { + "library": "scipy", + "name": "modified_dogleg", + "source_code": "def modified_dogleg(A, Y, b, trust_radius, lb, ub):\n newton_point = -Y.dot(b)\n if inside_box_boundaries(newton_point, lb, ub) and norm(newton_point) <= trust_radius:\n x = newton_point\n return x\n g = A.T.dot(b)\n A_g = A.dot(g)\n cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g\n origin_point = np.zeros_like(cauchy_point)\n z = cauchy_point\n p = newton_point - cauchy_point\n _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, trust_radius)\n if intersect:\n x1 = z + alpha * p\n else:\n z = origin_point\n p = cauchy_point\n _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius)\n x1 = z + alpha * p\n z = origin_point\n p = newton_point\n _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius)\n x2 = z + alpha * p\n if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):\n return x1\n else:\n return x2", + "docstring": "Approximately minimize ``, the upper bound for the ith component is just ignored. Returns ------- x : array_like, shape (n,) Solution to the problem. Notes ----- Based on implementations described in pp. 885-886 from [1]_. References ---------- .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. \"An interior point algorithm for large-scale nonlinear programming.\" SIAM Journal on Optimization 9.4 (1999): 877-900.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py", + "ast_data": "FunctionDef name:modified_dogleg arg:A arg:Y arg:b arg:trust_radius arg:lb arg:ub arguments arg arg arg arg arg arg Assign Call If BoolOp Call Compare Call Assign Return return:yes Assign Call Assign Call Assign Call Call Assign Call Assign Assign Assign Call If Assign Assign Assign Assign Call Assign Assign Assign Assign Call Assign If Compare Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "seaborn", + "name": "resolve_color", + "source_code": "def resolve_color(mark: Mark, data: DataFrame | dict, prefix: str='', scales: dict[str, Scale] | None=None) -> RGBATuple | ndarray:\n color = mark._resolve(data, f'{prefix}color', scales)\n if f'{prefix}alpha' in mark._mappable_props:\n alpha = mark._resolve(data, f'{prefix}alpha', scales)\n else:\n alpha = mark._resolve(data, 'alpha', scales)\n\n def visible(x, axis=None):\n return np.array(x).dtype.kind != 'f' or np.isfinite(x).all(axis)\n if np.ndim(color) < 2 and all((isinstance(x, float) for x in color)):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)", + "docstring": "Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support \"color\", \"fillcolor\", etc.", + "type": "function", + "file_path": "seaborn\\seaborn\\_marks\\base.py", + "ast_data": "FunctionDef name:resolve_color arg:mark arg:data arg:prefix arg:scales arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call FunctionDef name:visible arg:x arg:axis arguments arg arg Return return:yes BoolOp Compare Call Call Call If BoolOp Compare Call Call Call If Compare Call Return return:yes Call Assign Call Return return:yes Call If BoolOp Compare Call Compare Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_oas", + "source_code": "def _oas(X, *, assume_centered=False):\n if len(X.shape) == 2 and X.shape[1] == 1:\n if not assume_centered:\n X = X - X.mean()\n return (np.atleast_2d((X ** 2).mean()), 0.0)\n n_samples, n_features = X.shape\n emp_cov = empirical_covariance(X, assume_centered=assume_centered)\n alpha = np.mean(emp_cov ** 2)\n mu = np.trace(emp_cov) / n_features\n mu_squared = mu ** 2\n num = alpha + mu_squared\n den = (n_samples + 1) * (alpha - mu_squared / n_features)\n shrinkage = 1.0 if den == 0 else min(num / den, 1.0)\n shrunk_cov = (1.0 - shrinkage) * emp_cov\n shrunk_cov.flat[::n_features + 1] += shrinkage * mu\n return (shrunk_cov, shrinkage)", + "docstring": "Estimate covariance with the Oracle Approximating Shrinkage algorithm. The formulation is based on [1]_. [1] \"Shrinkage algorithms for MMSE covariance estimation.\", Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py", + "ast_data": "FunctionDef name:_oas arg:X arguments arg arg If BoolOp Compare Call Compare If Assign Call Return return:yes Call Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Assign Compare Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "IsLoopExit", + "source_code": "def IsLoopExit(op):\n return op.type == 'Exit' or op.type == 'RefExit'", + "docstring": "Return true if is an Exit.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:IsLoopExit arg:op arguments arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "dot", + "source_code": "def dot(inputs, axes, normalize=False, **kwargs):\n return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)", + "docstring": "Functional interface to the layer. Args: inputs: A list of input tensors (at least 2). axes: Integer or tuple of integers, axis or axes along which to take the dot product. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments. Returns: A tensor, the dot product of the samples from the inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py", + "ast_data": "FunctionDef name:dot arg:inputs arg:axes arg:normalize arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "check", + "source_code": "def check(self, app_configs):\n if app_configs is None:\n app_configs = apps.get_app_configs()\n app_configs = set(app_configs)\n errors = []\n modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin)\n for modeladmin in modeladmins:\n if modeladmin.model._meta.app_config in app_configs:\n errors.extend(modeladmin.check())\n return errors", + "docstring": "Run the system checks on all ModelAdmins, except if they aren't customized at all.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:check arg:self arg:app_configs arguments arg arg If Compare Assign Call Assign Call Assign Assign Call Compare For If Compare Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_minimize_cobyqa", + "source_code": "def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(), callback=None, disp=False, maxfev=None, maxiter=None, f_target=-np.inf, feasibility_tol=1e-08, initial_tr_radius=1.0, final_tr_radius=1e-06, scale=False, **unknown_options):\n from .._lib.cobyqa import minimize\n _check_unknown_options(unknown_options)\n options = {'disp': bool(disp), 'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0), 'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0), 'target': float(f_target), 'feasibility_tol': float(feasibility_tol), 'radius_init': float(initial_tr_radius), 'radius_final': float(final_tr_radius), 'scale': bool(scale)}\n with COBYQA_LOCK:\n return minimize(fun, x0, args, bounds, constraints, callback, options)", + "docstring": "Minimize a scalar function of one or more variables using the Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_. .. versionadded:: 1.14.0 Options ------- disp : bool Set to True to print information about the optimization procedure. Default is `feasibility_toltolminimize[-1, 1]`. References ---------- .. [1] COBYQA", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_cobyqa_py.py", + "ast_data": "FunctionDef name:_minimize_cobyqa arg:fun arg:x0 arg:args arg:bounds arg:constraints arg:callback arg:disp arg:maxfev arg:maxiter arg:f_target arg:feasibility_tol arg:initial_tr_radius arg:final_tr_radius arg:scale arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Compare Call Call Compare Call Call Call Call Call Call Call With Return return:yes Call" + }, + { + "library": "seaborn", + "name": "set_color_codes", + "source_code": "def set_color_codes(palette='deep'):\n if palette == 'reset':\n colors = [(0.0, 0.0, 1.0), (0.0, 0.5, 0.0), (1.0, 0.0, 0.0), (0.75, 0.0, 0.75), (0.75, 0.75, 0.0), (0.0, 0.75, 0.75), (0.0, 0.0, 0.0)]\n elif not isinstance(palette, str):\n err = 'set_color_codes requires a named seaborn palette'\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith('6'):\n palette = palette + '6'\n colors = SEABORN_PALETTES[palette] + [(0.1, 0.1, 0.1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n for code, color in zip('bgrmyck', colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb", + "docstring": "Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like \"b\" or \"g\" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle.", + "type": "function", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:set_color_codes arg:palette arguments arg If Compare Assign If Call Assign Raise Call If Compare If Call Assign Assign Assign Raise Call For Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "compute_gradients", + "source_code": "def compute_gradients(self, loss, var_list=None, gate_gradients=optimizer.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None):\n loss = self._scale_loss(loss)\n grads_and_vars = self._optimizer.compute_gradients(loss=loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)\n grads = [g for g, _ in grads_and_vars]\n variables = [v for _, v in grads_and_vars]\n unscaled_grads = self._unscale_grads(grads)\n return list(zip(unscaled_grads, variables))", + "docstring": "Compute gradients of for the variables in . This adjusts the dynamic range of the gradient evaluation by scaling up the value. The gradient values are then scaled back down by the reciprocal of the loss scale. This is useful in reduced precision training where small gradient values would otherwise underflow the representable range. Args: loss: A Tensor containing the value to minimize or a callable taking no arguments which returns the value to minimize. When eager execution is enabled it must be a callable. var_list: Optional list or tuple of to update to minimize . Defaults to the list of variables collected in the graph under the key . gate_gradients: How to gate the computation of gradients. Can be , , or . aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class . colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. grad_loss: Optional. A holding the gradient computed for . Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:compute_gradients arg:self arg:loss arg:var_list arg:gate_gradients arg:aggregation_method arg:colocate_gradients_with_ops arg:grad_loss arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_regular_normalize_batch_in_training", + "source_code": "def _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n mean, var = nn.moments(x, reduction_axes, None, None, False)\n normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n return (normed, mean, var)", + "docstring": "Non-fused version of . Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_regular_normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "is_gray", + "source_code": "def is_gray(self):\n if not self._isinit:\n self._init()\n return np.all(self._lut[:, 0] == self._lut[:, 1]) and np.all(self._lut[:, 0] == self._lut[:, 2])", + "docstring": "Return whether the colormap is grayscale.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:is_gray arg:self arguments arg If Call Return return:yes BoolOp Call Compare Call Compare" + }, + { + "library": "authlib", + "name": "is_revoked", + "source_code": "def is_revoked(self):\n raise NotImplementedError()", + "docstring": "A method to define if this token is revoked. For instance, there is a boolean column `` in the table:: def is_revoked(self): return self.revoked :return: boolean", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py", + "ast_data": "FunctionDef name:is_revoked arg:self arguments arg Raise Call" + }, + { + "library": "seaborn", + "name": "_lookup_single", + "source_code": "def _lookup_single(self, key, attr=None):\n if attr is None:\n value = self.lookup_table[key]\n else:\n value = self.lookup_table[key][attr]\n return value", + "docstring": "Get attribute(s) for a given data point.", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:_lookup_single arg:self arg:key arg:attr arguments arg arg arg If Compare Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X, return_std=False):\n y_mean = self._decision_function(X)\n if not return_std:\n return y_mean\n else:\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + 1.0 / self.alpha_)\n return (y_mean, y_std)", + "docstring": "Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_bayes.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arg:return_std arguments arg arg arg Assign Call If Return return:yes Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "LSTMCell", + "source_code": "class LSTMCell(RNNCellBase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, num_chunks=4, **kwargs)\n\n def _get_name(self):\n return 'DynamicQuantizedLSTMCell'\n\n def forward(self, input: Tensor, hx: Optional[tuple[Tensor, Tensor]]=None) -> tuple[Tensor, Tensor]:\n self.check_forward_input(input)\n if hx is None:\n zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n hx = (zeros, zeros)\n self.check_forward_hidden(input, hx[0], '[0]')\n self.check_forward_hidden(input, hx[1], '[1]')\n return torch.ops.quantized.quantized_lstm_cell_dynamic(input, hx, self._packed_weight_ih, self._packed_weight_hh, self.bias_ih, self.bias_hh)\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)", + "docstring": "A long short-term memory (LSTM) cell. A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs. Weights are quantized to 8 bits. We adopt the same interface as , please see for documentation. Examples:: >>> # xdoctest: +SKIP >>> rnn = nn.LSTMCell(10, 20) >>> input = torch.randn(6, 3, 10) >>> hx = torch.randn(3, 20) >>> cx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): ... hx, cx = rnn(input[i], (hx, cx)) ... output.append(hx)", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\rnn.py", + "ast_data": "ClassDef name:LSTMCell FunctionDef name:__init__ arg:self arguments arg arg arg Call Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:hx arguments arg arg arg Call If Compare Assign Call Call Assign Call Call Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "copy", + "source_code": "def copy(self, deep: bool | Literal['all']=True) -> Self:\n if deep:\n\n def copy_func(ax):\n return ax.copy(deep=True) if deep == 'all' else ax.view()\n new_axes = [copy_func(ax) for ax in self.axes]\n else:\n new_axes = [ax.view() for ax in self.axes]\n res = self.apply('copy', deep=deep)\n res.axes = new_axes\n if self.ndim > 1:\n blknos = self._blknos\n if blknos is not None:\n res._blknos = blknos.copy()\n res._blklocs = self._blklocs.copy()\n if deep:\n res._consolidate_inplace()\n return res", + "docstring": "Make deep or shallow copy of BlockManager Parameters ---------- deep : bool, string or None, default True If False or None, return a shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- BlockManager", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:copy arg:self arg:deep arguments arg arg If FunctionDef name:copy_func arg:ax arguments arg Return return:yes Compare Call Call Assign Call Assign Call Assign Call Assign If Compare Assign If Compare Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "save_model_states", + "source_code": "def save_model_states(state_dict, sparsified_model_dump_path, save_file_name, sparse_block_shape, norm, zip=True):\n folder_name = os.path.join(sparsified_model_dump_path, str(norm))\n folder_str = f'config_{sparse_block_shape}'\n model_state = state_dict['state_dict']\n model_state_path = os.path.join(folder_name, folder_str, save_file_name)\n os.makedirs(os.path.dirname(model_state_path), exist_ok=True)\n torch.save(model_state, model_state_path)\n if zip:\n zip_path = model_state_path.replace('.ckpt', '.zip')\n with ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip:\n zip.write(model_state_path, save_file_name)\n os.remove(model_state_path)\n model_state_path = zip_path\n model_state_path = os.path.abspath(model_state_path)\n file_size = os.path.getsize(model_state_path)\n file_size = file_size >> 20\n return (model_state_path, file_size)", + "docstring": "Dumps the state_dict() of the model. Args: state_dict (Dict) The state_dict() as dumped by dlrm_s_pytorch.py. Only the model state will be extracted from this dictionary. This corresponds to the 'state_dict' key in the state_dict dictionary. >>> model_state = state_dict['state_dict'] save_file_name (str) The filename (not path) when saving the model state dictionary sparse_block_shape (Tuple) The block shape corresponding to the data norm sparsifier. **Used for creating save directory** norm (str) type of norm (L1, L2) for the datanorm sparsifier. **Used for creating save directory** zip (bool) if True, the file is zip-compressed.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_disk_savings.py", + "ast_data": "FunctionDef name:save_model_states arg:state_dict arg:sparsified_model_dump_path arg:save_file_name arg:sparse_block_shape arg:norm arg:zip arguments arg arg arg arg arg arg Assign Call Call Assign Assign Assign Call Call Call Call If Assign Call With Call Call Call Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "@available_if(_estimator_has('predict'))\ndef predict(self, X, **params):\n check_is_fitted(self)\n _raise_for_params(params, self, 'predict')\n if _routing_enabled():\n routed_params = process_routing(self, 'predict', **params)\n else:\n routed_params = Bunch(estimator=Bunch(predict={}))\n X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n return self.estimator_.predict(X, **routed_params.estimator.predict)", + "docstring": "Predict the classes of . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- y : ndarray of shape (n_samples,) Array with predicted labels.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "maybe_infer_ndim", + "source_code": "def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:\n if ndim is None:\n if not isinstance(values.dtype, np.dtype):\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n else:\n ndim = values.ndim\n return ndim", + "docstring": "If is not provided, infer it from placement and values.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\api.py", + "ast_data": "FunctionDef name:maybe_infer_ndim arg:values arg:placement arg:ndim arguments arg arg arg If Compare If Call If Compare Call Assign Assign Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_validate_apply_axis_arg", + "source_code": "def _validate_apply_axis_arg(arg: NDFrame | Sequence | np.ndarray, arg_name: str, dtype: Any | None, data: NDFrame) -> np.ndarray:\n dtype = {'dtype': dtype} if dtype else {}\n if isinstance(arg, Series) and isinstance(data, DataFrame):\n raise ValueError(f\"'{arg_name}' is a Series but underlying data for operations is a DataFrame since 'axis=None'\")\n if isinstance(arg, DataFrame) and isinstance(data, Series):\n raise ValueError(f\"'{arg_name}' is a DataFrame but underlying data for operations is a Series with 'axis in [0,1]'\")\n if isinstance(arg, (Series, DataFrame)):\n arg = arg.reindex_like(data).to_numpy(**dtype)\n else:\n arg = np.asarray(arg, **dtype)\n assert isinstance(arg, np.ndarray)\n if arg.shape != data.shape:\n raise ValueError(f\"supplied '{arg_name}' is not correct shape for data over selected 'axis': got {arg.shape}, expected {data.shape}\")\n return arg", + "docstring": "For the apply-type methods, `` we must make sure that the two are compatible shapes, or raise. Parameters ---------- arg : sequence, Series or DataFrame the user input arg arg_name : string name of the arg for use in error messages dtype : numpy dtype, optional forced numpy dtype if given data : Series or DataFrame underling subset of Styler data on which operations are performed Returns ------- ndarray", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:_validate_apply_axis_arg arg:arg arg:arg_name arg:dtype arg:data arguments arg arg arg arg Assign If BoolOp Call Call Raise Call If BoolOp Call Call Raise Call If Call Assign Call Call Assign Call Call If Compare Raise Call Return return:yes" + }, + { + "library": "scrapy", + "name": "memoizemethod_noargs", + "source_code": "def memoizemethod_noargs(method: Callable[Concatenate[_SelfT, _P], _T]) -> Callable[Concatenate[_SelfT, _P], _T]:\n cache: weakref.WeakKeyDictionary[_SelfT, _T] = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self: _SelfT, *args: _P.args, **kwargs: _P.kwargs) -> _T:\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n return new_method", + "docstring": "Decorator to cache the result of a method (without arguments) using a weak reference to its object", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:memoizemethod_noargs arg:method arguments arg Call FunctionDef name:new_method arg:self arguments arg arg arg If Compare Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "is_array_api_strict_namespace", + "source_code": "def is_array_api_strict_namespace(xp: Namespace) -> bool:\n return xp.__name__ == 'array_api_strict'", + "docstring": "Returns True if is an array-api-strict namespace. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_array_api_strict_namespace arg:xp arguments arg Return return:yes Compare" + }, + { + "library": "django", + "name": "get_fields", + "source_code": "def get_fields(self, include_parents=True, include_hidden=False):\n if include_parents is False:\n include_parents = PROXY_PARENTS\n return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)", + "docstring": "Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a \"+\"", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:get_fields arg:self arg:include_parents arg:include_hidden arguments arg arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_na_values", + "source_code": "def get_na_values(col, na_values, na_fvalues, keep_default_na: bool):\n if isinstance(na_values, dict):\n if col in na_values:\n return (na_values[col], na_fvalues[col])\n else:\n if keep_default_na:\n return (STR_NA_VALUES, set())\n return (set(), set())\n else:\n return (na_values, na_fvalues)", + "docstring": "Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column.", + "type": "function", + "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py", + "ast_data": "FunctionDef name:get_na_values arg:col arg:na_values arg:na_fvalues arg:keep_default_na arguments arg arg arg arg If Call If Compare Return return:yes If Return return:yes Call Return return:yes Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "parse_stop_word", + "source_code": "def parse_stop_word(source: str) -> set[str]:\n result: set[str] = set()\n for line in source.splitlines():\n line = line.split('|')[0]\n result.update(line.split())\n return result", + "docstring": "Collect the stopwords from a snowball style word list: .. code:: text list of space separated stop words | optional comment", + "type": "function", + "file_path": "sphinx\\sphinx\\search\\__init__.py", + "ast_data": "FunctionDef name:parse_stop_word arg:source arguments arg Call For Call Assign Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "create_token_response", + "source_code": "@hooked\ndef create_token_response(self):\n refresh_token = self.request.refresh_token\n user = self.authenticate_user(refresh_token)\n if not user:\n raise InvalidRequestError(\"There is no 'user' for this token.\")\n client = self.request.client\n token = self.issue_token(user, refresh_token)\n log.debug('Issue token %r to %r', token, client)\n self.request.user = user\n self.save_token(token)\n self.revoke_old_credential(refresh_token)\n return (200, token, self.TOKEN_RESPONSE_HEADER)", + "docstring": "If valid and authorized, the authorization server issues an access token as described in Section 5.1. If the request failed verification or is invalid, the authorization server returns an error response as described in Section 5.2.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py", + "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Assign Call If Raise Call Assign Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_major_locator", + "source_code": "def get_major_locator(self):\n return self.major.locator", + "docstring": "Get the locator of the major ticker.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_major_locator arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "insert_records", + "source_code": "def insert_records(self, table: SQLTable, con, frame, name: str, index: bool | str | list[str] | None=True, schema=None, chunksize: int | None=None, method=None, **engine_kwargs) -> int | None:\n raise AbstractMethodError(self)", + "docstring": "Inserts data into already-prepared table", + "type": "method", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:insert_records arg:self arg:table arg:con arg:frame arg:name arg:index arg:schema arg:chunksize arg:method arguments arg arg arg arg arg arg arg arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n repr = f'gain={self.gain}, sign={self.sign}'\n return repr", + "docstring": "Return a string representation of the object.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "isgenerator", + "source_code": "def isgenerator(object):\n return _inspect.isgenerator(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.isgenerator.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:isgenerator arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "get_laf_pts_to_draw", + "source_code": "def get_laf_pts_to_draw(LAF: Tensor, img_idx: int=0) -> Tuple[List[int], List[int]]:\n KORNIA_CHECK_LAF(LAF)\n pts = laf_to_boundary_points(LAF[img_idx:img_idx + 1])[0]\n pts_np = pts.detach().permute(1, 0, 2).cpu()\n return (pts_np[..., 0].tolist(), pts_np[..., 1].tolist())", + "docstring": "Return list for drawing LAFs (local features). Args: LAF: :math: img_idx: which points to output. Returns: List of boundary points x, y` Examples: x, y = get_laf_pts_to_draw(LAF, img_idx) plt.figure() plt.imshow(kornia.utils.tensor_to_image(img[img_idx])) plt.plot(x, y, 'r') plt.show()", + "type": "function", + "file_path": "kornia\\kornia\\feature\\laf.py", + "ast_data": "FunctionDef name:get_laf_pts_to_draw arg:LAF arg:img_idx arguments arg arg Call Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "relu", + "source_code": "def relu(input: Tensor, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(relu, (input,), input, inplace=inplace)\n if inplace:\n result = torch.relu_(input)\n else:\n result = torch.relu(input)\n return result", + "docstring": "relu(input, inplace=False) -> Tensor Applies the rectified linear unit function element-wise. See :class: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:relu arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "XFrameOptionsMiddleware", + "source_code": "class XFrameOptionsMiddleware(MiddlewareMixin):\n\n def process_response(self, request, response):\n if response.get('X-Frame-Options') is not None:\n return response\n if getattr(response, 'xframe_options_exempt', False):\n return response\n response.headers['X-Frame-Options'] = self.get_xframe_options_value(request, response)\n return response\n\n def get_xframe_options_value(self, request, response):\n return getattr(settings, 'X_FRAME_OPTIONS', 'DENY').upper()", + "docstring": "Set the X-Frame-Options HTTP header in HTTP responses. Do not set the header if it's already set or if the response contains a xframe_options_exempt value set to True. By default, set the X-Frame-Options header to 'DENY', meaning the response cannot be displayed in a frame, regardless of the site attempting to do so. To enable the response to be loaded on a frame within the same site, set X_FRAME_OPTIONS in your project's Django settings to 'SAMEORIGIN'.", + "type": "class", + "file_path": "django\\django\\middleware\\clickjacking.py", + "ast_data": "ClassDef name:XFrameOptionsMiddleware FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Compare Call Return return:yes If Call Return return:yes Assign Call Return return:yes FunctionDef name:get_xframe_options_value arg:self arg:request arg:response arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__tf_unflatten__", + "source_code": "@classmethod\ndef __tf_unflatten__(cls, metadata, components):\n pass", + "docstring": "Create a user-defined object from (metadata, components). Args: metadata: a custom Python object that stands for the static config for reconstructing a new object of the current class. components: a that contains the dynamic data fields of the current class, for object reconstruction. Returns: The user-defined object, with the same class of the current object. Implementation Note: - This method should not invoke any TensorFlow ops. - This method only needs to unflatten the current level. If the object has an attribute that also need custom unflattening, nest functions will utilize this method to do recursive unflattening.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\custom_nest_protocol.py", + "ast_data": "FunctionDef name:__tf_unflatten__ arg:cls arg:metadata arg:components arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "visit", + "source_code": "def visit(unused_path, unused_parent, children):\n for child in children:\n _, attr = tf_decorator.unwrap(child[1])\n api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__internal__.' not in name]\n api_names_v2 = tf_export.get_v2_names(attr)\n if not api_names_v2:\n api_names_v2 = [name for name in api_names_v1 if name in all_v2_names]\n deprecated_api_names = set(api_names_v1) - set(api_names_v2)\n for name in deprecated_api_names:\n renames.add((name, get_canonical_name(api_names_v2, name)))", + "docstring": "Visitor that collects rename strings to add to rename_line_set.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py", + "ast_data": "FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call Compare Assign Call If Assign Compare Assign Call Call For Call Call" + }, + { + "library": "scipy", + "name": "_skip_if_dtype", + "source_code": "def _skip_if_dtype(arg):\n if isinstance(arg, str):\n return None\n if type(arg) is type:\n return None if issubclass(arg, np.generic) else arg\n else:\n return None if isinstance(arg, np.dtype) else arg", + "docstring": "'array or dtype' polymorphism. Return None for np.int8, dtype('float32') or 'f' etc arg for np.empty(3) etc", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_ni_support.py", + "ast_data": "FunctionDef name:_skip_if_dtype arg:arg arguments arg If Call Return return:no If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "sparse_mask", + "source_code": "@tf_export('sparse.mask', v1=['sparse.mask', 'sparse_mask'])\n@deprecation.deprecated_endpoints('sparse_mask')\ndef sparse_mask(a, mask_indices, name=None):\n with ops.name_scope(name, 'sparse_mask', [a, mask_indices]) as name:\n indices = a.indices\n out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return indexed_slices.IndexedSlices(out_values, out_indices, a.dense_shape)", + "docstring": "Masks elements of . Given an instance , returns another that contains a subset of the slices of . Only the slices at indices not specified in are returned. This is useful when you need to extract a subset of slices in an object. For example: Args: a: An instance. mask_indices: Indices of elements to mask. name: A name for the operation (optional). Returns: The masked instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:sparse_mask arg:a arg:mask_indices arg:name arguments arg arg arg With Call Assign Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "next", + "source_code": "def next(self, filename: str, lineno: int, instruction_pointer, inst) -> SpeculationEntry:\n if len(self.entries) == self.index:\n self.entries.append(SpeculationEntry(filename, lineno, instruction_pointer, inst))\n entry = self.entries[self.index]\n prev_entry_msg = ''\n if self.index != 0:\n prev_entry = self.entries[self.index - 1]\n prev_entry_msg = f'Previous instruction: {prev_entry.filename}:{prev_entry.lineno}({prev_entry.inst.opname} @ {prev_entry.instruction_pointer})\\n'\n if not (entry.instruction_pointer == instruction_pointer and entry.filename == filename and (entry.lineno == lineno)):\n raise SpeculationLogDivergence(f'\\nSpeculationLog diverged at index {self.index} (log had {len(self.entries)} entries):\\n- Expected: {entry.filename}:{entry.lineno} ({entry.inst.opname} at ip={entry.instruction_pointer})\\n- Actual: {filename}:{lineno} ({inst.opname} at ip={instruction_pointer})\\n{prev_entry_msg}\\nThere are two usual reasons why this may have occured:\\n- When Dynamo analysis restarted, the second run took a different path than\\n the first. If this occurred, the previous instruction is the critical instruction that\\n behaved differently.\\n- Speculation entries are only added under certain conditions (as seen in\\n step()), e.g., there must exist operators in the graph; those conditions may\\n have changed on restart.\\n\\nIf this divergence was intentional, clear the speculation log before restarting (do NOT\\ndo this for graph breaks, you will infinite loop).\\n\\nOtherwise, please submit a bug report, ideally including the contents of TORCH_LOGS=+dynamo\\n')\n self.index += 1\n return entry", + "docstring": "Lookup or create a SpeculationEntry() that is shared across RestartAnalysis calls. Args are used only for debug checks.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py", + "ast_data": "FunctionDef name:next arg:self arg:filename arg:lineno arg:instruction_pointer arg:inst arguments arg arg arg arg arg If Compare Call Call Call Assign Assign If Compare Assign Assign If BoolOp Compare Compare Compare Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "as_list", + "source_code": "def as_list(self):\n return self._flattened_inputs", + "docstring": "Returning the inputs as a list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:as_list arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TileGrad", + "source_code": "@ops.RegisterGradient('Tile')\ndef _TileGrad(op: ops.Operation, grad):\n input_shape = array_ops.shape(op.inputs[0], out_type=op.inputs[1].dtype)\n split_shape = array_ops.reshape(array_ops.transpose(array_ops_stack.stack([op.inputs[1], input_shape])), [-1])\n axes = math_ops.range(0, array_ops.size(split_shape), 2)\n if isinstance(grad, indexed_slices_lib.IndexedSlices):\n input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype)\n grad = math_ops.unsorted_segment_sum(grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0)\n split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)\n input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)\n if not context.executing_eagerly():\n input_grad.set_shape(op.inputs[0].get_shape())\n return [input_grad, None]", + "docstring": "Sum reduces grad along the tiled dimensions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_TileGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Call Call Assign Call Call If Call Assign Call Assign Call Call Assign Call Assign Call Call If Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "tpu_hardware_feature", + "source_code": "@property\ndef tpu_hardware_feature(self):\n if self._tpu_topology is None:\n return self._tpu_topology\n return self._tpu_topology.tpu_hardware_feature", + "docstring": "Returns the tpu topology info stored.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:tpu_hardware_feature arg:self arguments arg If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "process_exists", + "source_code": "def process_exists(self, task_type, task_id):\n return self.get_process_exit_code(task_type, task_id) is None", + "docstring": "Returns whether the subprocess still exists given the task type and id. Args: task_type: The task type. task_id: The task id. Returns: Boolean; whether the subprocess still exists. If the subprocess has exited, this returns False.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "FunctionDef name:process_exists arg:self arg:task_type arg:task_id arguments arg arg arg Return return:yes Compare Call" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X):\n scores = self._decision_function(X)\n log_likelihood = scores - scores.max(axis=1)[:, np.newaxis]\n return log_likelihood - np.log(np.exp(log_likelihood).sum(axis=1)[:, np.newaxis])", + "docstring": "Estimate log class probabilities. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_log_proba : ndarray of shape (n_samples, n_classes) Estimated log probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "load_state", + "source_code": "def load_state(model: nn.Module, weights: Sequence[Tensor], weight_names: Sequence[str], buffers: Sequence[Tensor]=(), buffer_names: Sequence[str]=()) -> nn.Module:\n assert len(weight_names) == len(weights)\n load_weights(model, weight_names, weights)\n if len(buffers) > 0:\n assert len(buffer_names) == len(buffers)\n load_buffers(model, buffer_names, buffers)\n return model", + "docstring": "load_state(model, weights, weight_names, buffers=(), buffer_names=()) -> model load_state takes and and assigns them to the model. This is the inverse operation of .", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\make_functional.py", + "ast_data": "FunctionDef name:load_state arg:model arg:weights arg:weight_names arg:buffers arg:buffer_names arguments arg arg arg arg arg Compare Call Call Call If Compare Call Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Hinge", + "source_code": "class Hinge(LossFunctionWrapper):\n\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):\n super().__init__(hinge, name=name, reduction=reduction)", + "docstring": "Computes the hinge loss between and . values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Hinge() >>> h(y_true, y_pred).numpy() 1.3 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.55 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.6 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.1, 1.5], dtype=float32) Usage with the API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "ClassDef name:Hinge FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "is_function", + "source_code": "def is_function(fname, graph):\n if context.executing_eagerly():\n return context.context().has_function(fname)\n else:\n while graph is not None:\n if graph._is_function(fname):\n return True\n if hasattr(graph, 'outer_graph'):\n graph = graph.outer_graph\n else:\n return False", + "docstring": "Checks for a function definition with in the current context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py", + "ast_data": "FunctionDef name:is_function arg:fname arg:graph arguments arg arg If Call Return return:yes Call Call While Compare If Call Return return:yes If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_num_buckets", + "source_code": "@abc.abstractproperty\ndef _num_buckets(self):\n pass", + "docstring": "Returns number of buckets in this sparse feature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "Repeat", + "source_code": "@dataclass\nclass Repeat(DimSpec):\n input_dim: DimSpec\n times: int\n\n @classmethod\n def new(cls, dim: DimSpec, times: int) -> DimSpec:\n if times == 1:\n return dim\n elif isinstance(dim, Singleton):\n return Broadcast(dim, times)\n else:\n return Repeat(dim, times)\n\n def inputs(self) -> Iterable[DimSpec]:\n return (self.input_dim,)", + "docstring": "Output dimension is the input dimension repeated n-times.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py", + "ast_data": "ClassDef name:Repeat FunctionDef name:new arg:cls arg:dim arg:times arguments arg arg arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call FunctionDef name:inputs arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "left_multiply", + "source_code": "def left_multiply(J, d, copy=True):\n if copy and (not isinstance(J, LinearOperator)):\n J = J.copy()\n if issparse(J):\n J.data *= np.repeat(d, np.diff(J.indptr))\n elif isinstance(J, LinearOperator):\n J = left_multiplied_operator(J, d)\n else:\n J *= d[:, np.newaxis]\n return J", + "docstring": "Compute diag(d) J. If is False, is modified in place (unless being LinearOperator).", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:left_multiply arg:J arg:d arg:copy arguments arg arg arg If BoolOp Call Assign Call If Call Call Call If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "get_script_prefix", + "source_code": "def get_script_prefix():\n return getattr(_prefixes, 'value', '/')", + "docstring": "Return the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner).", + "type": "function", + "file_path": "django\\django\\urls\\base.py", + "ast_data": "FunctionDef name:get_script_prefix arguments Return return:yes Call" + }, + { + "library": "scrapy", + "name": "iter_spider_classes", + "source_code": "def iter_spider_classes(module: ModuleType) -> Iterable[type[Spider]]:\n for obj in vars(module).values():\n if inspect.isclass(obj) and issubclass(obj, Spider) and (obj.__module__ == module.__name__) and getattr(obj, 'name', None):\n yield obj", + "docstring": "Return an iterator over all spider classes defined in the given module that can be instantiated (i.e. which have name)", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\spider.py", + "ast_data": "FunctionDef name:iter_spider_classes arg:module arguments arg For Call Call If BoolOp Call Call Compare Call" + }, + { + "library": "pandas", + "name": "_getitem_iterable", + "source_code": "def _getitem_iterable(self, key, axis: AxisInt):\n self._validate_key(key, axis)\n keyarr, indexer = self._get_listlike_indexer(key, axis)\n return self.obj._reindex_with_indexers({axis: [keyarr, indexer]}, allow_dups=True)", + "docstring": "Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s).", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_getitem_iterable arg:self arg:key arg:axis arguments arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "main", + "source_code": "def main():\n if not os.path.exists('release'):\n os.makedirs('release')\n write_release_task(os.path.join('release', 'README'))\n write_log_task(os.path.join('release', 'Changelog'))", + "docstring": "Checks weather release directory is present or not and calls the method to generate logs and notes", + "type": "function", + "file_path": "scipy\\tools\\write_release_and_log.py", + "ast_data": "FunctionDef name:main arguments If Call Call Call Call Call Call" + }, + { + "library": "django", + "name": "find", + "source_code": "def find(self, path, find_all=False, **kwargs):\n if kwargs:\n find_all = self._check_deprecated_find_param(find_all=find_all, **kwargs)\n matches = []\n for prefix, root in self.locations:\n if root not in searched_locations:\n searched_locations.append(root)\n matched_path = self.find_location(root, path, prefix)\n if matched_path:\n if not find_all:\n return matched_path\n matches.append(matched_path)\n return matches", + "docstring": "Look for files in the extra locations as defined in STATICFILES_DIRS.", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg If Assign Call Assign For If Compare Call Assign Call If If Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_assert_float_dtype", + "source_code": "def _assert_float_dtype(dtype):\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_floating:\n raise ValueError('Expected floating point type, got %s.' % dtype)\n return dtype", + "docstring": "Validate and return floating point type based on . must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if is not a floating point type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:_assert_float_dtype arg:dtype arguments arg Assign Call If Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "device", + "source_code": "@property\ndef device(self):\n return self.values.device", + "docstring": "The name of the device on which will be produced, or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_cached_or_new", + "source_code": "@classmethod\ndef _get_cached_or_new(cls):\n return cls._get_cached_or_new_impl(cls._build_latex_header())", + "docstring": "Return the previous LatexManager if the header and tex system did not change, or a new instance otherwise.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", + "ast_data": "FunctionDef name:_get_cached_or_new arg:cls arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "time_count_neighbors", + "source_code": "def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):\n if cls != 'cKDTree_weighted':\n self.T1.count_neighbors(self.T2, probe_radius, p=p)\n else:\n self.T1.count_neighbors(self.T2, probe_radius, weights=(self.w1, self.w2), p=p)", + "docstring": "Count neighbors kd-tree dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize | cls", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", + "ast_data": "FunctionDef name:time_count_neighbors arg:self arg:mn1n2 arg:p arg:probe_radius arg:boxsize arg:leafsize arg:cls arguments arg arg arg arg arg arg arg If Compare Call Call" + }, + { + "library": "scipy", + "name": "_get_support", + "source_code": "def _get_support(self, *args, **kwargs):\n return (self.a, self.b)", + "docstring": "Return the support of the (unscaled, unshifted) distribution. *Must* be overridden by distributions which have support dependent upon the shape parameters of the distribution. Any such override *must not* set or change any of the class members, as these members are shared amongst all instances of the distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- a, b : numeric (float, or int or +/-np.inf) end-points of the distribution's support for the specified shape parameters.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_get_support arg:self arguments arg arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_bounds", + "source_code": "def set_bounds(self, *args):\n if len(args) == 1:\n l, b, w, h = args[0]\n else:\n l, b, w, h = args\n self._x = l\n self._y = b\n self._width = w\n self._height = h\n self.stale = True", + "docstring": "Set the bounds of the rectangle. Call signatures:: set_bounds(left, bottom, width, height) set_bounds((left, bottom, width, height)) Parameters ---------- left, bottom : float The coordinates of the bottom left corner of the rectangle. width, height : float The width/height of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_bounds arg:self arguments arg arg If Compare Call Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_overloaded_args", + "source_code": "def _get_overloaded_args(relevant_args: Iterable[Any], get_type_fn: Optional[Callable[[Any], type]]=None) -> list[Any]:\n if get_type_fn is None:\n get_type_fn = type\n if not torch._C._is_torch_function_enabled():\n return []\n overloaded_types: set[type] = set()\n overloaded_args: list[Any] = []\n for arg in relevant_args:\n arg_type = get_type_fn(arg)\n if arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__') and (arg_type.__torch_function__ != torch._C._disabled_torch_function_impl):\n if overloaded_types:\n overloaded_types.add(arg_type)\n index = len(overloaded_args)\n for i, old_arg in enumerate(overloaded_args):\n if issubclass(arg_type, get_type_fn(old_arg)):\n index = i\n break\n overloaded_args.insert(index, arg)\n else:\n overloaded_types = {arg_type}\n overloaded_args = [arg]\n return overloaded_args", + "docstring": "Returns a list of arguments on which to call __torch_function__. Checks arguments in relevant_args for __torch_function__ implementations, storing references to the arguments and their types in overloaded_args and overloaded_types in order of calling precedence. Only distinct types are considered. If a type is a subclass of another type it will have higher precedence, otherwise the precedence order is the same as the order of arguments in relevant_args, that is, from left-to-right in the argument list. The precedence-determining algorithm implemented in this function is described in _. See torch::append_overloaded_arg for the equivalent function in the C++ implementation. Parameters ---------- relevant_args : iterable of array-like Iterable of array-like arguments to check for __torch_function__ methods. get_type_fn : callable, optional Function to call on each argument in relevant_args to get its type. Returns ------- overloaded_args : list Arguments from relevant_args on which to call __torch_function__ methods, in the order in which they should be called. .. _NEP-0018:", + "type": "function", + "file_path": "pytorch\\torch\\overrides.py", + "ast_data": "FunctionDef name:_get_overloaded_args arg:relevant_args arg:get_type_fn arguments arg arg If Compare Assign If Call Return return:no Call For Assign Call If BoolOp Compare Call Compare If Call Assign Call For Call If Call Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "convert", + "source_code": "@_export_metrics\ndef convert(self):\n if self.experimental_lower_to_saved_model:\n saved_model_convert_result = self._convert_as_saved_model()\n if saved_model_convert_result:\n return saved_model_convert_result\n graph_def, input_tensors, output_tensors, frozen_func = self._freeze_concrete_function()\n graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func)\n return super(TFLiteFrozenGraphConverterV2, self).convert(graph_def, input_tensors, output_tensors)", + "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:convert arg:self arguments arg If Assign Call If Return return:yes Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_CoreLocation", + "source_code": "class _CoreLocation:\n\n def __init__(self, x: int=0, y: int=0, z: int=0, core: int=0):\n self.x = x\n self.y = y\n self.z = z\n self.core = core\n\n def __eq__(self, other):\n if not isinstance(other, _CoreLocation):\n return False\n return self.x == other.x and self.y == other.y and (self.z == other.z) and (self.core == other.core)\n\n def __ne__(self, other):\n if not isinstance(other, _CoreLocation):\n return True\n return not self == other\n\n def __hash__(self):\n return hash((self.x, self.y, self.z, self.core))\n\n def __repr__(self):\n return f'{type(self).__name__}(x={self.x}, y={self.y}, z={self.z}, core={self.core})'\n\n def to_list(self):\n return [self.x, self.y, self.z, self.core]", + "docstring": "Represents a TPU core's location in the mesh.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py", + "ast_data": "ClassDef name:_CoreLocation FunctionDef name:__init__ arg:self arg:x arg:y arg:z arg:core arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare Compare Compare FunctionDef name:__ne__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:to_list arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "subplots_adjust", + "source_code": "def subplots_adjust(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):\n if self.get_layout_engine() is not None and (not self.get_layout_engine().adjust_compatible):\n _api.warn_external('This figure was using a layout engine that is incompatible with subplots_adjust and/or tight_layout; not calling subplots_adjust.')\n return\n self.subplotpars.update(left, bottom, right, top, wspace, hspace)\n for ax in self.axes:\n if ax.get_subplotspec() is not None:\n ax._set_position(ax.get_subplotspec().get_position(self))\n self.stale = True", + "docstring": "Adjust the subplot layout parameters. Unset parameters are left unmodified; initial values are given by :rc:. .. plot:: _embedded_plots/figure_subplots_adjust.py Parameters ---------- left : float, optional The position of the left edge of the subplots, as a fraction of the figure width. right : float, optional The position of the right edge of the subplots, as a fraction of the figure width. bottom : float, optional The position of the bottom edge of the subplots, as a fraction of the figure height. top : float, optional The position of the top edge of the subplots, as a fraction of the figure height. wspace : float, optional The width of the padding between subplots, as a fraction of the average Axes width. hspace : float, optional The height of the padding between subplots, as a fraction of the average Axes height.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:subplots_adjust arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace arguments arg arg arg arg arg arg arg If BoolOp Compare Call Call Call Return return:no Call For If Compare Call Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "_enable_cp_dispatcher", + "source_code": "@contextlib.contextmanager\ndef _enable_cp_dispatcher() -> Generator[None, None, None]:\n old_handlers = DTensor._op_dispatcher._custom_op_handlers\n DTensor._op_dispatcher._custom_op_handlers = {**old_handlers, **customized_ops}\n yield\n DTensor._op_dispatcher._custom_op_handlers = old_handlers", + "docstring": "Enables DTensor dispatcher to dispatch SDPA to CP.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py", + "ast_data": "FunctionDef name:_enable_cp_dispatcher arguments Assign Assign Assign" + }, + { + "library": "virtualenv", + "name": "ExePathRefToDest", + "source_code": "class ExePathRefToDest(PathRefToDest, ExePathRef):\n\n def __init__(self, src, targets, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:\n ExePathRef.__init__(self, src, must, when)\n PathRefToDest.__init__(self, src, dest, must, when)\n if not self.FS_CASE_SENSITIVE:\n targets = list(OrderedDict(((i.lower(), None) for i in targets)).keys())\n self.base = targets[0]\n self.aliases = targets[1:]\n self.dest = dest\n\n def run(self, creator, symlinks):\n bin_dir = self.dest(creator, self.src).parent\n dest = bin_dir / self.base\n method = self.method(symlinks)\n method(self.src, dest)\n if not symlinks:\n make_exe(dest)\n for extra in self.aliases:\n link_file = bin_dir / extra\n if link_file.exists():\n link_file.unlink()\n if symlinks:\n link_file.symlink_to(self.base)\n else:\n copy(self.src, link_file)\n if not symlinks:\n make_exe(link_file)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(src={self.src}, alias={self.aliases})'", + "docstring": "Link a exe path on the file system.", + "type": "class", + "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\ref.py", + "ast_data": "ClassDef name:ExePathRefToDest FunctionDef name:__init__ arg:self arg:src arg:targets arg:dest arg:must arg:when arguments arg arg arg arg arg arg Call Call If Assign Call Call Call Call Assign Assign Assign FunctionDef name:run arg:self arg:creator arg:symlinks arguments arg arg arg Assign Call Assign Assign Call Call If Call For Assign If Call Call If Call Call If Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "local_variables", + "source_code": "@property\ndef local_variables(self):\n if self._variables_created:\n return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, self.variable_scope_name)\n else:\n return []", + "docstring": "Returns the list of global variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:local_variables arg:self arguments arg If Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "coords", + "source_code": "def coords(self, device_idx: int) -> tensor.Tensor:\n strides = ops.convert_to_tensor(self.strides)\n shape = ops.convert_to_tensor(self.shape())\n return device_idx // strides % shape", + "docstring": "Converts the device index into a tensor of mesh coordinates.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:coords arg:self arg:device_idx arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "convert_conv3d_weight_memory_format", + "source_code": "def convert_conv3d_weight_memory_format(module: _M, memory_format: torch.memory_format) -> _M:\n if isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)):\n weight_data = module.weight.detach().clone(memory_format=memory_format)\n module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format)\n for child in module.children():\n convert_conv3d_weight_memory_format(child, memory_format)\n return module", + "docstring": "Convert `` Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) >>> input = torch.randint(1, 10, (2, 8, 4, 4, 4), dtype=torch.float16, device=\"cuda\") >>> model = nn.Sequential( >>> nn.Conv3d(8, 4, 3)).cuda().half() >>> # This is identical to: >>> # nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> model = nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last_3d) >>> out = model(input)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\memory_format.py", + "ast_data": "FunctionDef name:convert_conv3d_weight_memory_format arg:module arg:memory_format arguments arg arg If Call Assign Call Call Assign Call Call For Call Call Return return:yes" + }, + { + "library": "virtualenv", + "name": "from_bundle", + "source_code": "def from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update, env):\n of_version = Version.of_version(version)\n wheel = load_embed_wheel(app_data, distribution, for_py_version, of_version)\n if version != Version.embed:\n if app_data.can_update:\n per = do_periodic_update\n wheel = periodic_update(distribution, of_version, for_py_version, wheel, search_dirs, app_data, per, env)\n found_wheel = from_dir(distribution, of_version, for_py_version, search_dirs)\n if found_wheel is not None and (wheel is None or found_wheel.version_tuple > wheel.version_tuple):\n wheel = found_wheel\n return wheel", + "docstring": "Load the bundled wheel to a cache directory.", + "type": "function", + "file_path": "virtualenv\\src\\virtualenv\\seed\\wheels\\bundle.py", + "ast_data": "FunctionDef name:from_bundle arg:distribution arg:version arg:for_py_version arg:search_dirs arg:app_data arg:do_periodic_update arg:env arguments arg arg arg arg arg arg arg Assign Call Assign Call If Compare If Assign Assign Call Assign Call If BoolOp Compare BoolOp Compare Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "reduce_mean", + "source_code": "def reduce_mean(self, x):\n return self.reduce(lambda y: math_ops.reduce_mean(y, axis=0), x)", + "docstring": "Performs a mean reduction on across pfor iterations. Note that this currently may not work inside a control flow construct. Args: x: an unvectorized Tensor. Returns: A Tensor that has same rank as . The value is the mean of the values of across the pfor iterations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:reduce_mean arg:self arg:x arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "pandas", + "name": "describe_categorical_1d", + "source_code": "def describe_categorical_1d(data: Series, percentiles_ignored: Sequence[float]) -> Series:\n names = ['count', 'unique', 'top', 'freq']\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n if count_unique > 0:\n top, freq = (objcounts.index[0], objcounts.iloc[0])\n dtype = None\n else:\n top, freq = (np.nan, np.nan)\n dtype = 'object'\n result = [data.count(), count_unique, top, freq]\n from pandas import Series\n return Series(result, index=names, name=data.name, dtype=dtype)", + "docstring": "Describe series containing categorical data. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface.", + "type": "function", + "file_path": "pandas\\pandas\\core\\methods\\describe.py", + "ast_data": "FunctionDef name:describe_categorical_1d arg:data arg:percentiles_ignored arguments arg arg Assign Assign Call Assign Call Compare If Compare Assign Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_init_from_args", + "source_code": "def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, swap_memory, name):\n if not isinstance(parallel_iterations, int) or parallel_iterations <= 0:\n raise ValueError(\"'parallel_iterations' must be a positive integer: %s\" % parallel_iterations)\n self._name = ops.get_default_graph().unique_name(name)\n self._maximum_iterations = maximum_iterations\n self._parallel_iterations = parallel_iterations\n self._back_prop = back_prop\n self._swap_memory = swap_memory\n self._pivot_for_pred = None\n self._pivot_for_body = None\n self._pivot = None\n self._loop_exits = []\n self._loop_enters = []\n self._graph = ops.get_default_graph()", + "docstring": "Creates a new from arguments. Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. Raises: ValueError: If has invalid value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_init_from_args arg:self arg:maximum_iterations arg:parallel_iterations arg:back_prop arg:swap_memory arg:name arguments arg arg arg arg arg arg If BoolOp Call Compare Raise Call Assign Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call" + }, + { + "library": "matplotlib", + "name": "tick_values", + "source_code": "def tick_values(self, vmin, vmax):\n if self.nbins is None:\n return self.locs\n step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)\n ticks = self.locs[::step]\n for i in range(1, step):\n ticks1 = self.locs[i::step]\n if np.abs(ticks1).min() < np.abs(ticks).min():\n ticks = ticks1\n return self.raise_if_exceeds(ticks)", + "docstring": "Return the locations of the ticks. .. note:: Because the values are fixed, *vmin* and *vmax* are not used.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg If Compare Return return:yes Assign Call Call Call Call Assign For Call Assign If Compare Call Call Call Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "cumfreq", + "source_code": "def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):\n h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)\n cumhist = np.cumsum(h * 1, axis=0)\n return CumfreqResult(cumhist, l, b, e)", + "docstring": "Return a cumulative frequency histogram, using the histogram function. A cumulative histogram is a mapping that counts the cumulative number of observations in all of the bins up to the specified bin. Parameters ---------- a : array_like Input array. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultreallimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in is used. Specifically `a`. Default is None, which gives each value a weight of 1.0 Returns ------- cumcount : ndarray Binned values of cumulative frequency. lowerlimit : float Lower real limit binsize : float Width of each bin. extrapoints : int Extra points. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> rng = np.random.default_rng() >>> x = [1, 4, 2, 1, 3, 1] >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) >>> res.cumcount array([ 1., 2., 3., 3.]) >>> res.extrapoints 3 Create a normal distribution with 1000 random values >>> samples = stats.norm.rvs(size=1000, random_state=rng) Calculate cumulative frequencies >>> res = stats.cumfreq(samples, numbins=25) Calculate space of values for x >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, ... res.cumcount.size) Plot histogram and cumulative histogram >>> fig = plt.figure(figsize=(10, 4)) >>> ax1 = fig.add_subplot(1, 2, 1) >>> ax2 = fig.add_subplot(1, 2, 2) >>> ax1.hist(samples, bins=25) >>> ax1.set_title('Histogram') >>> ax2.bar(x, res.cumcount, width=res.binsize) >>> ax2.set_title('Cumulative histogram') >>> ax2.set_xlim([x.min(), x.max()]) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:cumfreq arg:a arg:numbins arg:defaultreallimits arg:weights arguments arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_rewrite_spec_if_needed", + "source_code": "def _rewrite_spec_if_needed(spec: shard_spec.ShardingSpec, tensor: torch.Tensor, rank: int) -> shard_spec.ShardingSpec:\n if not isinstance(spec, ChunkShardingSpec):\n return spec\n rewrite = False\n for p in spec.placements:\n p = cast(_remote_device, p)\n if p.rank() == rank and p.device() != tensor.device:\n rewrite = True\n break\n if rewrite:\n spec = copy.deepcopy(spec)\n for i, placement in enumerate(spec.placements):\n placement = cast(_remote_device, placement)\n if placement.rank() == rank and placement.device() != tensor.device:\n spec.placements[i] = _remote_device(f'rank:{rank}/{tensor.device}')\n return spec", + "docstring": "Rewrite ``. FSDP.sharded_optim_state_dict sneakly ships optimizer state to CPU so if the original ShardingSpec produces CUDA metadata, ST construction bombs.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\fsdp.py", + "ast_data": "FunctionDef name:_rewrite_spec_if_needed arg:spec arg:tensor arg:rank arguments arg arg arg If Call Return return:yes Assign For Assign Call If BoolOp Compare Call Compare Call Assign If Assign Call For Call Assign Call If BoolOp Compare Call Compare Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "resampled", + "source_code": "def resampled(self, lutshape):\n if not np.iterable(lutshape) or len(lutshape) != len(self):\n raise ValueError(f'lutshape must be of length {len(self)}')\n new_cmap = self.copy()\n for i, s in enumerate(lutshape):\n if s is not None:\n new_cmap._colormaps[i] = self[i].resampled(s)\n return new_cmap", + "docstring": "Return a new colormap with *lutshape* entries. Parameters ---------- lutshape : tuple of (, ) The tuple must have a length matching the number of variates. For each element in the tuple, if , the corresponding colorbar is resampled, if , the corresponding colorbar is not resampled. Returns ------- MultivarColormap", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:resampled arg:self arg:lutshape arguments arg arg If BoolOp Call Compare Call Call Raise Call Call Assign Call For Call If Compare Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "_create_octave_mask", + "source_code": "def _create_octave_mask(mask: Tensor, octave_shape: List[int]) -> Tensor:\n mask_shape = octave_shape[-2:]\n mask_octave = F.interpolate(mask, mask_shape, mode='bilinear', align_corners=False)\n return mask_octave.unsqueeze(1)", + "docstring": "Downsample a mask based on the given octave shape.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\scale_space_detector.py", + "ast_data": "FunctionDef name:_create_octave_mask arg:mask arg:octave_shape arguments arg arg Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "call", + "source_code": "def call(self, method_name: str, args: Optional[Sequence[core_tf_types.Tensor]]=None, output_specs=None, timeout_in_ms=0):\n if args is None:\n args = []\n status_or, deleter = gen_rpc_ops.rpc_call(self._client_handle, args=nest.flatten(args), method_name=method_name, timeout_in_ms=timeout_in_ms)\n return StatusOrResult(status_or, deleter, output_specs)", + "docstring": "Method to invoke remote registered functions on the connected server. Server should be started before making an RPC Call. Args: method_name: Registered method to invoke on Server. args: Input arguments for the method. output_specs: Output specs for the output from method. timeout_in_ms: Timeout for this call. If 0, default client timeout will be used. Returns: StatusOrResult object. This function issues the RPC call to server, it does not block for the duration of RPC. Please call is_ok, get_error or get_value methods on the returned object to blocked till RPC finishes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py", + "ast_data": "FunctionDef name:call arg:self arg:method_name arg:args arg:output_specs arg:timeout_in_ms arguments arg arg arg arg arg If Compare Assign Assign Call Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "MutableAsyncChain", + "source_code": "class MutableAsyncChain(AsyncIterator[_T]):\n\n def __init__(self, *args: Iterable[_T] | AsyncIterator[_T]):\n self.data: AsyncIterator[_T] = _async_chain(*args)\n\n def extend(self, *iterables: Iterable[_T] | AsyncIterator[_T]) -> None:\n self.data = _async_chain(self.data, _async_chain(*iterables))\n\n def __aiter__(self) -> Self:\n return self\n\n async def __anext__(self) -> _T:\n return await self.data.__anext__()", + "docstring": "Similar to MutableChain but for async iterables", + "type": "class", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "ClassDef name:MutableAsyncChain FunctionDef name:__init__ arg:self arguments arg arg Call FunctionDef name:extend arg:self arguments arg arg Assign Call Call FunctionDef name:__aiter__ arg:self arguments arg Return return:yes AsyncFunctionDef name:__anext__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "generate_table", + "source_code": "def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]:\n gso_table = self._gso_table\n gso_df = self.df\n columns = list(gso_df.columns)\n selected = gso_df[self.columns]\n col_index = [(col, columns.index(col)) for col in self.columns]\n keys = np.empty(selected.shape, dtype=np.uint64)\n for o, (idx, row) in enumerate(selected.iterrows()):\n for j, (col, v) in enumerate(col_index):\n val = row[col]\n val = '' if isna(val) else val\n key = gso_table.get(val, None)\n if key is None:\n key = (v + 1, o + 1)\n gso_table[val] = key\n keys[o, j] = self._convert_key(key)\n for i, col in enumerate(self.columns):\n gso_df[col] = keys[:, i]\n return (gso_table, gso_df)", + "docstring": "Generates the GSO lookup table for the DataFrame Returns ------- gso_table : dict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on the dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:generate_table arg:self arguments arg Assign Assign Assign Call Assign Assign Call Assign Call For Call Call For Call Assign Assign Call Assign Call If Compare Assign Assign Assign Call For Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "_clean_exit", + "source_code": "def _clean_exit(self):\n if self.state != states.EXITING:\n warnings.warn('The main thread is exiting, but the Bus is in the %r state; shutting it down automatically now. You must either call bus.block() after start(), or call bus.exit() before the main thread exits.' % self.state, RuntimeWarning)\n self.exit()", + "docstring": "Assert that the Bus is not running in atexit handler callback.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", + "ast_data": "FunctionDef name:_clean_exit arg:self arguments arg If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_NextAfterGrad", + "source_code": "@ops.RegisterGradient('NextAfter')\ndef _NextAfterGrad(op: ops.Operation, grad):\n x1 = op.inputs[0]\n x2 = op.inputs[1]\n s_x1 = array_ops.shape(x1)\n s_x2 = array_ops.shape(x2)\n r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)\n with ops.control_dependencies([grad]):\n partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)\n partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1), array_ops.reshape(math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))", + "docstring": "Returns gradient of nextafter(x1, x2) with respect to x1 and x2.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_NextAfterGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_inputs_with_flattening", + "source_code": "def _inputs_with_flattening(pfor_input: _PforInput, input_indices):\n if input_indices is None:\n input_indices = []\n pfor_input.stack_inputs(stack_indices=input_indices)\n inputs = []\n for i in range(pfor_input.num_inputs):\n if i in input_indices:\n inp = pfor_input.stacked_input(i)\n inp = _flatten_first_two_dims(inp)\n else:\n inp = pfor_input.unstacked_input(i)\n inputs.append(inp)\n return inputs", + "docstring": "Stacks and flattens first dim of inputs at indices .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_inputs_with_flattening arg:pfor_input arg:input_indices arguments arg arg If Compare Assign Call Assign For Call If Compare Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ELU", + "source_code": "class ELU(torch.nn.ELU):\n\n def __init__(self, scale, zero_point, alpha=1.0):\n super().__init__(alpha)\n self.scale = scale\n self.zero_point = zero_point\n\n def forward(self, input):\n return torch.ao.nn.quantized.functional.elu(input, self.scale, self.zero_point, self.alpha)\n\n def _get_name(self):\n return 'QuantizedELU'\n\n @staticmethod\n def from_float(mod, use_precomputed_fake_quant=False):\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return ELU(float(scale), int(zero_point), mod.alpha)\n\n @classmethod\n def from_reference(cls, mod, scale, zero_point):\n return cls(float(scale), int(zero_point), mod.alpha)", + "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor alpha: the alpha constant", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py", + "ast_data": "ClassDef name:ELU FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:alpha arguments arg arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "id_for_label", + "source_code": "def id_for_label(self, id_):\n return id_", + "docstring": "Return the HTML ID attribute of this Widget for use by a , given the ID of the field. Return an empty string if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:id_for_label arg:self arg:id_ arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "download", + "source_code": "def download(name: str, output_dir: str, url: str, reference_bin_hash: str) -> bool:\n binary_path = Path(output_dir, name)\n if check(binary_path, reference_bin_hash):\n logging.info('Correct binary already exists at %s. Exiting.', binary_path)\n return True\n binary_path.parent.mkdir(parents=True, exist_ok=True)\n logging.info('Downloading %s to %s', url, binary_path)\n if DRY_RUN:\n logging.info('Exiting as there is nothing left to do in dry run mode')\n return True\n urllib.request.urlretrieve(url, binary_path, reporthook=report_download_progress if sys.stdout.isatty() else None)\n logging.info('Downloaded %s successfully.', name)\n if not check(binary_path, reference_bin_hash):\n logging.critical('Downloaded binary %s failed its hash check', name)\n return False\n mode = os.stat(binary_path).st_mode\n mode |= stat.S_IXUSR\n os.chmod(binary_path, mode)\n logging.info('Using %s located at %s', name, binary_path)\n return True", + "docstring": "Download a platform-appropriate binary if one doesn't already exist at the expected location and verifies that it is the right binary by checking its SHA256 hash against the expected hash.", + "type": "function", + "file_path": "pytorch\\tools\\linter\\adapters\\s3_init.py", + "ast_data": "FunctionDef name:download arg:name arg:output_dir arg:url arg:reference_bin_hash arguments arg arg arg arg Assign Call If Call Call Return return:yes Call Call If Call Return return:yes Call Call Call If Call Call Return return:yes Assign Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "TemplateError", + "source_code": "class TemplateError(Exception):\n\n def __init__(self, message, position, name=None):\n Exception.__init__(self, message)\n self.position = position\n self.name = name\n\n def __str__(self):\n msg = ' '.join(self.args)\n if self.position:\n msg = '%s at line %s column %s' % (msg, self.position[0], self.position[1])\n if self.name:\n msg += ' in %s' % self.name\n return msg", + "docstring": "Exception raised while parsing a template", + "type": "class", + "file_path": "numpy\\numpy\\_build_utils\\tempita\\_tempita.py", + "ast_data": "ClassDef name:TemplateError FunctionDef name:__init__ arg:self arg:message arg:position arg:name arguments arg arg arg arg Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call If Assign If Return return:yes" + }, + { + "library": "tensorflow", + "name": "Symbol", + "source_code": "class Symbol(Tensor):\n pass", + "docstring": "Symbolic \"graph\" Tensor. These objects represent the output of an op definition and do not carry a value.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:Symbol" + }, + { + "library": "tensorflow", + "name": "experimental_local_results", + "source_code": "def experimental_local_results(self, value):\n return self._extended._local_results(value)", + "docstring": "Returns the list of all local per-replica values contained in . Note: This only returns values on the worker initiated by this client. When using a like , each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by , scopevaluevalue(value,).`", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "_get_to_python", + "source_code": "def _get_to_python(self, field):\n while field.remote_field is not None:\n field = field.remote_field.get_related_field()\n return field.to_python", + "docstring": "If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python.", + "type": "method", + "file_path": "django\\django\\forms\\models.py", + "ast_data": "FunctionDef name:_get_to_python arg:self arg:field arguments arg arg While Compare Assign Call Return return:yes" + }, + { + "library": "django", + "name": "get_slug_field", + "source_code": "def get_slug_field(self):\n return self.slug_field", + "docstring": "Get the name of a slug field to be used to look up by slug.", + "type": "method", + "file_path": "django\\django\\views\\generic\\detail.py", + "ast_data": "FunctionDef name:get_slug_field arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_get_empty_dtype", + "source_code": "def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:\n if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):\n empty_dtype = join_units[0].block.dtype\n return empty_dtype\n has_none_blocks = any((unit.block.dtype.kind == 'V' for unit in join_units))\n dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]\n dtype = find_common_type(dtypes)\n if has_none_blocks:\n dtype = ensure_dtype_can_hold_na(dtype)\n return dtype", + "docstring": "Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_get_empty_dtype arg:join_units arguments arg If Call Assign Return return:yes Assign Call Compare Assign Assign Call If Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_cleanup", + "source_code": "def _cleanup(self) -> None:\n for tmp_dir in self._tmp_dirs:\n with contextlib.suppress(Exception):\n shutil.rmtree(tmp_dir)", + "docstring": "Remove temporary directories.", + "type": "method", + "file_path": "sphinx\\sphinx\\theming.py", + "ast_data": "FunctionDef name:_cleanup arg:self arguments arg For With Call Call" + }, + { + "library": "django", + "name": "Form", + "source_code": "class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass):\n pass", + "docstring": "A collection of Fields, plus their associated data.", + "type": "class", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "ClassDef name:Form" + }, + { + "library": "tensorflow", + "name": "predict_classes", + "source_code": "def predict_classes(self, x, batch_size=32, verbose=0):\n warnings.warn('`model.predict_classes()` is deprecated and will be removed after 2021-01-01. Please use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).')\n proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n if proba.shape[-1] > 1:\n return proba.argmax(axis=-1)\n else:\n return (proba > 0.5).astype('int32')", + "docstring": "Generate class predictions for the input samples. The input samples are processed batch by batch. Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A numpy array of class predictions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py", + "ast_data": "FunctionDef name:predict_classes arg:self arg:x arg:batch_size arg:verbose arguments arg arg arg arg Call Assign Call If Compare Return return:yes Call Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "ShapeEquals", + "source_code": "def ShapeEquals(tensor_proto, shape):\n if not isinstance(tensor_proto, tensor_pb2.TensorProto):\n raise TypeError(f'`tensor_proto` must be a tensor_pb2.TensorProto object, but got type {type(tensor_proto)}.')\n if isinstance(shape, tensor_shape_pb2.TensorShapeProto):\n shape = [d.size for d in shape.dim]\n elif not isinstance(shape, (list, tuple)):\n raise TypeError(f'`shape` must be a list or tuple, but got type {type(shape)}.')\n tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]\n return all((x == y for x, y in zip(tensor_shape_list, shape)))", + "docstring": "Returns True if \"tensor_proto\" has the given \"shape\". Args: tensor_proto: A TensorProto. shape: A tensor shape, expressed as a TensorShape, list, or tuple. Returns: True if \"tensor_proto\" has the given \"shape\", otherwise False. Raises: TypeError: If \"tensor_proto\" is not a TensorProto, or shape is not a TensorShape, list, or tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py", + "ast_data": "FunctionDef name:ShapeEquals arg:tensor_proto arg:shape arguments arg arg If Call Raise Call Call If Call Assign If Call Raise Call Call Assign Return return:yes Call Compare Call" + }, + { + "library": "pandas", + "name": "find_valid_index", + "source_code": "def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:\n assert how in ['first', 'last']\n if len(is_valid) == 0:\n return None\n if is_valid.ndim == 2:\n is_valid = is_valid.any(axis=1)\n if how == 'first':\n idxpos = is_valid[:].argmax()\n elif how == 'last':\n idxpos = len(is_valid) - 1 - is_valid[::-1].argmax()\n chk_notna = is_valid[idxpos]\n if not chk_notna:\n return None\n return idxpos", + "docstring": "Retrieves the positional index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. is_valid: np.ndarray Mask to find na_values. Returns ------- int or None", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:find_valid_index arg:how arg:is_valid arguments arg arg Compare If Compare Call Return return:no If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Assign If Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "tensor_list", + "source_code": "def tensor_list(elements, element_dtype=None, element_shape=None, use_tensor_array=False):\n _validate_list_constructor(elements, element_dtype, element_shape)\n if use_tensor_array:\n return data_structures.tf_tensor_array_new(elements, element_dtype, element_shape)\n else:\n return data_structures.tf_tensor_list_new(elements, element_dtype, element_shape)", + "docstring": "Creates an tensor list and populates it with the given elements. This function provides a more uniform access to tensor lists and tensor arrays, and allows optional initialization. Note: this function is a simplified wrapper. If you need greater control, it is recommended to use the underlying implementation directly. Args: elements: Iterable[tf.Tensor, ...], the elements to initially fill the list with element_dtype: Optional[tf.DType], data type for the elements in the list; required if the list is empty element_shape: Optional[tf.TensorShape], shape for the elements in the list; required if the list is empty use_tensor_array: bool, whether to use the more compatible but restrictive tf.TensorArray implementation Returns: Union[tf.Tensor, tf.TensorArray], the new list. Raises: ValueError: for invalid arguments", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\special_functions.py", + "ast_data": "FunctionDef name:tensor_list arg:elements arg:element_dtype arg:element_shape arg:use_tensor_array arguments arg arg arg arg Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "power_divergence", + "source_code": "@xp_capabilities(jax_jit=False, allow_dask_compute=True)\n@_axis_nan_policy_factory(Power_divergenceResult, paired=True, n_samples=_pd_nsamples, too_small=-1)\ndef power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):\n return _power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_=lambda_)", + "docstring": "Cressie-Read power divergence statistic and goodness of fit test. This function tests the null hypothesis that the categorical data has the given frequencies, using the Cressie-Read power divergence statistic. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default the categories are assumed to be equally likely. ddof : int, optional \"Delta degrees of freedom\": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with `kddoff_obsf_expf_obslambda_chisquareaxis and are 1-D. pvalue : float or ndarray The p-value of the test. The value is a float if and the return value are scalars. See Also -------- chisquare Notes ----- This test is invalid when the observed or expected frequencies in each category are too small. A typical rule is that all of the observed and expected frequencies should be at least 5. Also, the sum of the observed and expected frequencies must be the same for the test to be valid; raises an error if the sums do not agree within a relative tolerance of `lambda_f_obsf_obsf_explambda_chisquaref_obsf_expf_obsddofddoff_obsf_expf_obsf_expf_obsf_exp`: >>> power_divergence([16, 18, 16, 14, 12, 12], ... f_exp=[[16, 16, 16, 16, 16, 8], ... [8, 20, 20, 16, 12, 12]], ... axis=1) (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:power_divergence arg:f_obs arg:f_exp arg:ddof arg:axis arg:lambda_ arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> DHPrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "pandas", + "name": "_merge_with_dialect_properties", + "source_code": "def _merge_with_dialect_properties(dialect: csv.Dialect, defaults: dict[str, Any]) -> dict[str, Any]:\n kwds = defaults.copy()\n for param in MANDATORY_DIALECT_ATTRS:\n dialect_val = getattr(dialect, param)\n parser_default = parser_defaults[param]\n provided = kwds.get(param, parser_default)\n conflict_msgs = []\n if provided not in (parser_default, dialect_val):\n msg = f\"Conflicting values for '{param}': '{provided}' was provided, but the dialect specifies '{dialect_val}'. Using the dialect-specified value.\"\n if not (param == 'delimiter' and kwds.pop('sep_override', False)):\n conflict_msgs.append(msg)\n if conflict_msgs:\n warnings.warn('\\n\\n'.join(conflict_msgs), ParserWarning, stacklevel=find_stack_level())\n kwds[param] = dialect_val\n return kwds", + "docstring": "Merge default kwargs in TextFileReader with dialect parameters. Parameters ---------- dialect : csv.Dialect Concrete csv dialect. See csv.Dialect documentation for more details. defaults : dict Keyword arguments passed to TextFileReader. Returns ------- kwds : dict Updated keyword arguments, merged with dialect parameters.", + "type": "function", + "file_path": "pandas\\pandas\\io\\parsers\\readers.py", + "ast_data": "FunctionDef name:_merge_with_dialect_properties arg:dialect arg:defaults arguments arg arg Assign Call For Assign Call Assign Assign Call Assign If Compare Assign If BoolOp Compare Call Call If Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_default_initializer", + "source_code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n del shape\n if dtype.is_floating:\n initializer = init_ops.glorot_uniform_initializer()\n initializing_from_value = False\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n initializer = init_ops.zeros_initializer()\n initializing_from_value = False\n else:\n raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n return (initializer, initializing_from_value)", + "docstring": "Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py", + "ast_data": "FunctionDef name:_get_default_initializer arg:self arg:name arg:shape arg:dtype arguments arg arg arg arg If Assign Call Assign If BoolOp Compare Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_sum_rightmost", + "source_code": "def _sum_rightmost(value, dim):\n if dim == 0:\n return value\n required_shape = value.shape[:-dim] + (-1,)\n return value.reshape(required_shape).sum(-1)", + "docstring": "Sum out ``. dim (int): The number of rightmost dims to sum out.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\utils.py", + "ast_data": "FunctionDef name:_sum_rightmost arg:value arg:dim arguments arg arg If Compare Return return:yes Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "YearLocator", + "source_code": "class YearLocator(RRuleLocator):\n\n def __init__(self, base=1, month=1, day=1, tz=None):\n rule = rrulewrapper(YEARLY, interval=base, bymonth=month, bymonthday=day, **self.hms0d)\n super().__init__(rule, tz=tz)\n self.base = ticker._Edge_integer(base, 0)\n\n def _create_rrule(self, vmin, vmax):\n ymin = max(self.base.le(vmin.year) * self.base.step, 1)\n ymax = min(self.base.ge(vmax.year) * self.base.step, 9999)\n c = self.rule._construct\n replace = {'year': ymin, 'month': c.get('bymonth', 1), 'day': c.get('bymonthday', 1), 'hour': 0, 'minute': 0, 'second': 0}\n start = vmin.replace(**replace)\n stop = start.replace(year=ymax)\n self.rule.set(dtstart=start, until=stop)\n return (start, stop)", + "docstring": "Make ticks on a given day of each year that is a multiple of base. Examples:: # Tick every year on Jan 1st locator = YearLocator() # Tick every 5 years on July 4th locator = YearLocator(5, month=7, day=4)", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "ClassDef name:YearLocator FunctionDef name:__init__ arg:self arg:base arg:month arg:day arg:tz arguments arg arg arg arg arg Assign Call Call Call Assign Call FunctionDef name:_create_rrule arg:self arg:vmin arg:vmax arguments arg arg arg Assign Call Call Assign Call Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "path_of_module", + "source_code": "@compatibility(is_backward_compatible=True)\ndef path_of_module(self, mod: torch.nn.Module) -> str:\n if self.submodule_paths:\n path = self.submodule_paths.get(mod)\n if path is None:\n raise NameError('module is not installed as a submodule')\n assert isinstance(path, str)\n return path\n else:\n for n, p in self.root.named_modules():\n if mod is p:\n return n\n raise NameError('module is not installed as a submodule')", + "docstring": "Helper method to find the qualified name of `` to retrieve the qualified name for.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:path_of_module arg:self arg:mod arguments arg arg If Assign Call If Compare Raise Call Call Return return:yes For Call If Compare Return return:yes Raise Call Call" + }, + { + "library": "pytorch", + "name": "pre_load_state_dict_hook", + "source_code": "def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n for submodule_name, submodule in module.named_modules():\n for attr_name in submodule.__dict__.keys():\n mod_prefix = prefix + submodule_name\n key = mod_prefix + ('.' if mod_prefix else '') + attr_name\n if key in state_dict:\n if isinstance(state_dict[key], ShardedTensor):\n setattr(submodule, attr_name, state_dict[key])", + "docstring": "Pre-load state dict hook to add ShardedTensor to the module.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", + "ast_data": "FunctionDef name:pre_load_state_dict_hook arg:module arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg For Call For Call Assign Assign If Compare If Call Call" + }, + { + "library": "tensorflow", + "name": "_identity", + "source_code": "def _identity(x):\n if isinstance(x, tensor_array_ops.TensorArray):\n return x.identity()\n elif isinstance(x, ops.Operation):\n return control_flow_ops.group(x)\n elif context.executing_eagerly() and x is None:\n return None\n else:\n return array_ops.identity(x)", + "docstring": "Identity op that recognizes , , and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py", + "ast_data": "FunctionDef name:_identity arg:x arguments arg If Call Return return:yes Call If Call Return return:yes Call If BoolOp Call Compare Return return:no Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_sparse_values_to_keep", + "source_code": "def _sparse_values_to_keep(t, keep_input):\n row_values = t.indices[:, 0]\n return array_ops.gather(keep_input, row_values)", + "docstring": "Convert a per-row vector to a per-value one.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:_sparse_values_to_keep arg:t arg:keep_input arguments arg arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_check_rnn_cell_input_dtypes", + "source_code": "def _check_rnn_cell_input_dtypes(inputs):\n for t in nest.flatten(inputs):\n _check_supported_dtypes(t.dtype)", + "docstring": "Check whether the input tensors are with supported dtypes. Default RNN cells only support floats and complex as its dtypes since the activation function (tanh and sigmoid) only allow those types. This function will throw a proper error message if the inputs is not in a supported type. Args: inputs: tensor or nested structure of tensors that are feed to RNN cell as input or state. Raises: ValueError: if any of the input tensor are not having dtypes of float or complex.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:_check_rnn_cell_input_dtypes arg:inputs arguments arg For Call Call" + }, + { + "library": "matplotlib", + "name": "backend_for_gui_framework", + "source_code": "def backend_for_gui_framework(self, framework):\n return self._GUI_FRAMEWORK_TO_BACKEND.get(framework.lower())", + "docstring": "Return the name of the backend corresponding to the specified GUI framework. Parameters ---------- framework : str GUI framework such as \"qt\". Returns ------- str or None Backend name or None if GUI framework not recognised.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py", + "ast_data": "FunctionDef name:backend_for_gui_framework arg:self arg:framework arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_make_device_specs", + "source_code": "def _make_device_specs(devices: Optional[List[Union[tf_device.DeviceSpec, str]]]=None, device_type: Optional[str]=None) -> Tuple[List[tf_device.DeviceSpec], str]:\n if devices is None:\n if device_type is None:\n device_type = 'CPU'\n devices = config.local_devices(device_type)\n else:\n if isinstance(devices[0], str):\n devices = [tf_device.DeviceSpec.from_string(d) for d in devices]\n if device_type is None:\n device_type = devices[0].device_type\n if device_type.upper() != devices[0].device_type.upper():\n raise ValueError(f'Conflicting devices {str(devices)} and device_type {device_type}')\n return (devices, device_type)", + "docstring": "Makes device specs for all local devices or from a provided list.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\mesh_util.py", + "ast_data": "FunctionDef name:_make_device_specs arg:devices arg:device_type arguments arg arg If Compare If Compare Assign Assign Call If Call Assign Call If Compare Assign If Compare Call Call Raise Call Call Return return:yes" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, dr_input):\n if isinstance(dr_input, str):\n self.ensure_registered()\n if dr_input.lower() in self._alias:\n name = self._alias[dr_input.lower()]\n else:\n name = dr_input\n driver = c_void_p(capi.get_driver_by_name(force_bytes(name)))\n elif isinstance(dr_input, int):\n self.ensure_registered()\n driver = capi.get_driver(dr_input)\n elif isinstance(dr_input, c_void_p):\n driver = dr_input\n else:\n raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % type(dr_input))\n if not driver:\n raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % dr_input)\n self.ptr = driver", + "docstring": "Initialize an GDAL/OGR driver on either a string or integer input.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\driver.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dr_input arguments arg arg If Call Call If Compare Call Assign Call Assign Assign Call Call Call If Call Call Assign Call If Call Assign Raise Call Call If Raise Call Assign" + }, + { + "library": "scipy", + "name": "_compute_absolute_step", + "source_code": "def _compute_absolute_step(rel_step, x0, f0, method):\n sign_x0 = (x0 >= 0).astype(float) * 2 - 1\n rstep = _eps_for_method(x0.dtype, f0.dtype, method)\n if rel_step is None:\n abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))\n else:\n abs_step = rel_step * sign_x0 * np.abs(x0)\n dx = x0 + abs_step - x0\n abs_step = np.where(dx == 0, rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), abs_step)\n return abs_step", + "docstring": "Computes an absolute step from a relative step for finite difference calculation. Parameters ---------- rel_step: None or array-like Relative step for the finite difference calculation x0 : np.ndarray Parameter vector f0 : np.ndarray or scalar method : {'2-point', '3-point', 'cs'} Returns ------- h : float The absolute step size Notes ----- will always be np.float64. However, if or are smaller floating point dtypes (e.g. np.float32), then the absolute step size will be calculated from the smallest floating point size.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_numdiff.py", + "ast_data": "FunctionDef name:_compute_absolute_step arg:rel_step arg:x0 arg:f0 arg:method arguments arg arg arg arg Assign Call Compare Assign Call If Compare Assign Call Call Assign Call Assign Assign Call Compare Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "argsort", + "source_code": "def argsort(self, axis=-1, kind=None, order=None):\n return self.__array__().argsort(axis, kind, order)", + "docstring": "Return the indices that sort the array lexicographically. For full documentation see , for which this method is in fact merely a \"thin wrapper.\" Examples -------- >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') >>> c = c.view(np.char.chararray); c chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], dtype='|S5') >>> c[c.argsort()] chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], dtype='|S5')", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:argsort arg:self arg:axis arg:kind arg:order arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "validate_request_object_encryption_alg", + "source_code": "def validate_request_object_encryption_alg(self):\n self._validate_claim_value('request_object_encryption_alg')", + "docstring": "JWE [JWE] alg algorithm [JWA] the RP is declaring that it may use for encrypting Request Objects sent to the OP. This parameter SHOULD be included when symmetric encryption will be used, since this signals to the OP that a client_secret value needs to be returned from which the symmetric key will be derived, that might not otherwise be returned. The RP MAY still use other supported encryption algorithms or send unencrypted Request Objects, even when this parameter is present. If both signing and encryption are requested, the Request Object will be signed then encrypted, with the result being a Nested JWT, as defined in [JWT]. The default, if omitted, is that the RP is not declaring whether it might encrypt any Request Objects.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_request_object_encryption_alg arg:self arguments arg Call" + }, + { + "library": "scipy", + "name": "random_base2", + "source_code": "def random_base2(self, m: IntNumber) -> np.ndarray:\n n = 2 ** m\n total_n = self.num_generated + n\n if not total_n & total_n - 1 == 0:\n raise ValueError(f\"The balance properties of Sobol' points require n to be a power of 2. {self.num_generated} points have been previously generated, then: n={self.num_generated}+2**{m}={total_n}. If you still want to do this, the function 'Sobol.random()' can be used.\")\n return self.random(n)", + "docstring": "Draw point(s) from the Sobol' sequence. This function draws :math: points in the parameter space ensuring the balance properties of the sequence. Parameters ---------- m : int Logarithm in base 2 of the number of samples; i.e., n = 2^m. Returns ------- sample : array_like (n, d) Sobol' sample.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:random_base2 arg:self arg:m arguments arg arg Assign Assign If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "DefaultState", + "source_code": "class DefaultState:\n __slots__ = ['process_group', 'world_size', 'gradient_predivide_factor', 'gradient_postdivide_factor']\n\n def __init__(self, process_group: dist.ProcessGroup):\n if process_group is None:\n raise ValueError(f'Expected to pass in an explicit ProcessGroup to {self}.')\n self.process_group = process_group\n self.world_size = dist.get_world_size(process_group)\n self.gradient_predivide_factor = self._get_gradient_predivide_factor(self.world_size)\n self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor\n\n @staticmethod\n def _get_gradient_predivide_factor(world_size: int) -> float:\n factor: int = 1\n while world_size % factor == 0 and world_size / factor > factor:\n factor *= 2\n return float(factor)", + "docstring": "Stores state needed to perform the default communication algorithm within a communication hook. Args: process_group (ProcessGroup): The process group to be used.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py", + "ast_data": "ClassDef name:DefaultState Assign FunctionDef name:__init__ arg:self arg:process_group arguments arg arg If Compare Raise Call Assign Assign Call Assign Call Assign FunctionDef name:_get_gradient_predivide_factor arg:world_size arguments arg While BoolOp Compare Compare Return return:yes Call" + }, + { + "library": "django", + "name": "language", + "source_code": "def language(self):\n return self.__language", + "docstring": "Return the translation language.", + "type": "method", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:language arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "PlanePlot", + "source_code": "class PlanePlot(MPLPlot, ABC):\n _layout_type = 'single'\n\n def __init__(self, data, x, y, **kwargs) -> None:\n MPLPlot.__init__(self, data, **kwargs)\n if x is None or y is None:\n raise ValueError(self._kind + ' requires an x and y column')\n if is_integer(x) and (not holds_integer(self.data.columns)):\n x = self.data.columns[x]\n if is_integer(y) and (not holds_integer(self.data.columns)):\n y = self.data.columns[y]\n self.x = x\n self.y = y\n\n @final\n def _get_nseries(self, data: Series | DataFrame) -> int:\n return 1\n\n @final\n def _post_plot_logic(self, ax: Axes, data) -> None:\n x, y = (self.x, self.y)\n xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x)\n ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n @final\n def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds):\n img = ax.collections[-1]\n return fig.colorbar(img, ax=ax, **kwds)", + "docstring": "Abstract class for plotting on plane, currently scatter and hexbin.", + "type": "class", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py", + "ast_data": "ClassDef name:PlanePlot Assign FunctionDef name:__init__ arg:self arg:data arg:x arg:y arguments arg arg arg arg arg Call If BoolOp Compare Compare Raise Call If BoolOp Call Call Assign If BoolOp Call Call Assign Assign Assign FunctionDef name:_get_nseries arg:self arg:data arguments arg arg Return return:yes FunctionDef name:_post_plot_logic arg:self arg:ax arg:data arguments arg arg arg Assign Assign Compare Call Assign Compare Call Call Call FunctionDef name:_plot_colorbar arg:self arg:ax arguments arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "apply_gradients", + "source_code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n if distribute_lib.in_cross_replica_context():\n raise ValueError('apply_gradients() must be called in a replica context.')\n if not self._doing_dynamic_loss_scaling():\n return self._optimizer.apply_gradients(grads_and_vars, global_step, name)\n replica_context = distribute_lib.get_replica_context()\n grads_and_vars = tuple(grads_and_vars)\n return replica_context.merge_call(self._distributed_apply, args=(grads_and_vars, global_step, name))", + "docstring": "Apply gradients to variables. This is the second part of . It returns an that conditionally applies gradients if all gradient values are finite. Otherwise no update is performed (nor is incremented). Args: grads_and_vars: List of (gradient, variable) pairs as returned by . global_step: Optional to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the constructor. Returns: An that conditionally applies the specified gradients. If was not None, that operation also increments . Raises: RuntimeError: If you should use instead.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:apply_gradients arg:self arg:grads_and_vars arg:global_step arg:name arguments arg arg arg arg If Call Raise Call If Call Return return:yes Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "QuantizationSpec", + "source_code": "@dataclass(eq=True, frozen=True)\nclass QuantizationSpec(QuantizationSpecBase):\n dtype: torch.dtype\n observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor\n quant_min: Optional[int] = None\n quant_max: Optional[int] = None\n qscheme: Optional[torch.qscheme] = None\n ch_axis: Optional[int] = None\n is_dynamic: bool = False\n\n def __post_init__(self):\n if self.quant_min is not None and self.quant_max is not None and (self.quant_min > self.quant_max):\n raise ValueError(f'quant_min {self.quant_min} must be <= quant_max {self.quant_max}.')\n if self.ch_axis is not None and self.ch_axis < 0:\n raise ValueError('Ch_axis is < 0.')", + "docstring": "Quantization spec for common operators that allows user to specify how to quantize a Tensor, this includes dtype, quant_min, quant_max etc.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py", + "ast_data": "ClassDef name:QuantizationSpec FunctionDef name:__post_init__ arg:self arguments arg If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare Raise Call Call" + }, + { + "library": "kornia", + "name": "_PostInitInjectionMetaClass", + "source_code": "class _PostInitInjectionMetaClass(type):\n\n def __call__(cls: Type[T], *args: Any, **kwargs: Any) -> T:\n obj = type.__call__(cls, *args, **kwargs)\n obj.__post_init__()\n return obj", + "docstring": "To inject the `` function after the creation of each instance.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\base.py", + "ast_data": "ClassDef name:_PostInitInjectionMetaClass FunctionDef name:__call__ arg:cls arguments arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_less_equal_flops", + "source_code": "@ops.RegisterStatistics('LessEqual', 'flops')\ndef _less_equal_flops(graph, node):\n return _binary_per_element_op_flops(graph, node)", + "docstring": "Compute flops for LessEqual operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_less_equal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "split_hostlist", + "source_code": "def split_hostlist(hostlist):\n in_brackets = False\n cur_host = ''\n for c in hostlist:\n if in_brackets:\n assert c != '['\n if c == ']':\n in_brackets = False\n elif c == '[':\n in_brackets = True\n elif c == ',':\n assert cur_host != ''\n yield cur_host\n cur_host = ''\n continue\n cur_host += c\n if cur_host:\n yield cur_host", + "docstring": "Split hostlist at commas outside of range expressions ('[3-5]').", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py", + "ast_data": "FunctionDef name:split_hostlist arg:hostlist arguments arg Assign Assign For If Compare If Compare Assign If Compare Assign If Compare Compare Assign If" + }, + { + "library": "tensorflow", + "name": "_FakeServer", + "source_code": "class _FakeServer(object):\n\n def start(self):\n logging.info('Creating a remote session to start a TensorFlow server, target = %r, session_config=%r', target, session_config)\n session.Session(target=target, config=session_config)\n\n def join(self):\n while True:\n time.sleep(5)", + "docstring": "A fake server that runs a master session.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "ClassDef name:_FakeServer FunctionDef name:start arg:self arguments arg Call Call FunctionDef name:join arg:self arguments arg While Call" + }, + { + "library": "kornia", + "name": "r", + "source_code": "@property\ndef r(self) -> So2:\n return self._rotation", + "docstring": "Return the underlying .", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:r arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "safe", + "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef safe(value):\n return mark_safe(value)", + "docstring": "Mark the value as a string that should not be auto-escaped.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:safe arg:value arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "construct_tensor_variable", + "source_code": "def construct_tensor_variable(target_cls, tx, proxy, example_value, subclass_type, options):\n example_value = _clone_input(example_value, tx.fake_mode)\n set_example_value(proxy.node, example_value)\n tx.output.current_tracer.track_unbacked_symbols(example_value, proxy)\n specialized_props = target_cls.specialize(example_value)\n if isinstance(example_value, torch._subclasses.fake_tensor.FakeTensor) and example_value.fake_mode is tx.fake_mode:\n if subclass_type:\n tensor_type = subclass_type\n elif isinstance(example_value, torch.nn.Parameter):\n tensor_type = torch.nn.Parameter\n elif isinstance(example_value, torch.nn.Buffer):\n tensor_type = torch.nn.Buffer\n else:\n tensor_type = torch.Tensor\n specialized_props['class_type'] = tensor_type\n options.update(specialized_props)\n return target_cls(proxy, **options)", + "docstring": "Actually construct a tensor variable after all the pre-processing from wrapping a pre-existing or newly created tensor value.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\variables\\builder.py", + "ast_data": "FunctionDef name:construct_tensor_variable arg:target_cls arg:tx arg:proxy arg:example_value arg:subclass_type arg:options arguments arg arg arg arg arg arg Assign Call Call Call Assign Call If BoolOp Call Compare If Assign If Call Assign If Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reorder_kwargs", + "source_code": "def reorder_kwargs(user_kwargs: dict[str, Any], spec: TreeSpec) -> dict[str, Any]:\n assert spec.type is tuple\n assert spec.num_children == 2\n kwargs_spec = spec.children_specs[1]\n assert kwargs_spec.type is dict\n if set(user_kwargs) != set(kwargs_spec.context):\n raise ValueError(f'Ran into a kwarg keyword mismatch: Got the following keywords {list(user_kwargs)} but expected {kwargs_spec.context}')\n reordered_kwargs = {}\n for kw in kwargs_spec.context:\n reordered_kwargs[kw] = user_kwargs[kw]\n return reordered_kwargs", + "docstring": "Reorder user-provided kwargs to match the order in . is expected to be the in_spec of an exported program, i.e. the spec that results from flattening . We need this to provide consistent input ordering, such so that users can pass in foo(a=a, b=b) OR foo(b=b, a=a) and receive the same result.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_tree_utils.py", + "ast_data": "FunctionDef name:reorder_kwargs arg:user_kwargs arg:spec arguments arg arg Compare Compare Assign Compare If Compare Call Call Raise Call Call Assign For Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_sketch_params", + "source_code": "def get_sketch_params(self):\n return self._sketch", + "docstring": "Return the sketch parameters for the artist. Returns ------- tuple or None A 3-tuple with the following elements: - *scale*: The amplitude of the wiggle perpendicular to the source line. - *length*: The length of the wiggle along the line. - *randomness*: The scale factor by which the length is shrunken or expanded. Returns *None* if no sketch parameters were set.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_sketch_params arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "InsertVar", + "source_code": "class InsertVar:\n types = {'AutoField': int, 'BigAutoField': int, 'SmallAutoField': int, 'IntegerField': int, 'BigIntegerField': int, 'SmallIntegerField': int, 'PositiveBigIntegerField': int, 'PositiveSmallIntegerField': int, 'PositiveIntegerField': int, 'BooleanField': int, 'FloatField': Database.DB_TYPE_BINARY_DOUBLE, 'DateTimeField': Database.DB_TYPE_TIMESTAMP, 'DateField': Database.Date, 'DecimalField': decimal.Decimal}\n\n def __init__(self, field):\n internal_type = getattr(field, 'target_field', field).get_internal_type()\n self.db_type = self.types.get(internal_type, str)\n self.bound_param = None\n\n def bind_parameter(self, cursor):\n self.bound_param = cursor.cursor.var(self.db_type)\n return self.bound_param\n\n def get_value(self):\n return self.bound_param.getvalue()", + "docstring": "A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement.", + "type": "class", + "file_path": "django\\django\\db\\backends\\oracle\\utils.py", + "ast_data": "ClassDef name:InsertVar Assign FunctionDef name:__init__ arg:self arg:field arguments arg arg Assign Call Call Assign Call Assign FunctionDef name:bind_parameter arg:self arg:cursor arguments arg arg Assign Call Return return:yes FunctionDef name:get_value arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "cluster_spec", + "source_code": "@abc.abstractmethod\ndef cluster_spec(self):\n raise NotImplementedError()", + "docstring": "Retrieve the current state of the cluster and return a . Returns: A representing the state of the cluster at the moment this function is called. Implementors of this function must take care in ensuring that the ClusterSpec returned is up-to-date at the time of calling this function. This usually means retrieving the information from the underlying cluster management system every time this function is invoked and reconstructing a cluster_spec, rather than attempting to cache anything.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", + "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Raise Call" + }, + { + "library": "scrapy", + "name": "rel_has_nofollow", + "source_code": "def rel_has_nofollow(rel: str | None) -> bool:\n return rel is not None and 'nofollow' in rel.replace(',', ' ').split()", + "docstring": "Return True if link rel attribute has nofollow type", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:rel_has_nofollow arg:rel arguments arg Return return:yes BoolOp Compare Compare Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_loss", + "source_code": "@abstractmethod\ndef _get_loss(self, sample_weight):\n pass", + "docstring": "Get loss object from sklearn._loss.loss.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:_get_loss arg:self arg:sample_weight arguments arg arg" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, *args: Any) -> Any:\n nested_tensors = _map_tensor_data(self._nested_input)\n result = self.forward_extended(*nested_tensors)\n del self._nested_input\n self._nested_output = result\n return tuple(_iter_tensors(result))", + "docstring": "Shared forward utility.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg arg Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_insert_obs_or_fq", + "source_code": "def _insert_obs_or_fq(node: Node, obs_or_fq: ObserverOrFakeQuantize, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], graph: Graph) -> Node:\n model_device = assert_and_get_unique_device(model)\n if model_device:\n obs_or_fq.to(model_device)\n if is_equalization_observer(obs_or_fq):\n prefix = node.name + '_equalization_process_'\n else:\n prefix = 'activation_post_process_'\n get_new_obs_or_fq_name = get_new_attr_name_with_prefix(prefix)\n obs_or_fq_name = get_new_obs_or_fq_name(model)\n setattr(model, obs_or_fq_name, obs_or_fq)\n named_modules[obs_or_fq_name] = obs_or_fq\n with graph.inserting_after(node):\n new_obs = graph.create_node('call_module', obs_or_fq_name, (node,), {})\n return new_obs", + "docstring": "Attaches to , and creates a node which calls on the output of . obs_or_fq: an instance of Observer or FakeQuantize module", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py", + "ast_data": "FunctionDef name:_insert_obs_or_fq arg:node arg:obs_or_fq arg:model arg:named_modules arg:graph arguments arg arg arg arg arg Assign Call If Call If Call Assign Assign Assign Call Assign Call Call Assign With Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "save_counter", + "source_code": "@property\ndef save_counter(self):\n self._maybe_create_save_counter()\n return self._save_counter", + "docstring": "An integer variable which starts at zero and is incremented on save. Used to number checkpoints. Returns: The save counter variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:save_counter arg:self arguments arg Call Return return:yes" + }, + { + "library": "numpy", + "name": "__setitem__", + "source_code": "def __setitem__(self, indx, value):\n ma.MaskedArray.__setitem__(self, indx, value)\n if isinstance(indx, str):\n self._mask[indx] = ma.getmaskarray(value)", + "docstring": "Sets the given record to value.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\mrecords.py", + "ast_data": "FunctionDef name:__setitem__ arg:self arg:indx arg:value arguments arg arg arg Call If Call Assign Call" + }, + { + "library": "tensorflow", + "name": "assign_sub", + "source_code": "def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n raise NotImplementedError", + "docstring": "Subtracts a value from this variable. This is essentially a shortcut for . Args: delta: A . The value to subtract from this variable. use_locking: If , use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If is false, instead returns None in Eager mode and the assign op in graph mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:assign_sub arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_convert_model_from_bytearray_to_object", + "source_code": "def _convert_model_from_bytearray_to_object(model_bytearray):\n model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)\n model_object = schema_fb.ModelT.InitFromObj(model_object)\n model_object = copy.deepcopy(model_object)\n return model_object", + "docstring": "Converts a tflite model from a bytearray into a parsable object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:_convert_model_from_bytearray_to_object arg:model_bytearray arguments arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_xla_sharding", + "source_code": "def get_xla_sharding(var: BaseResourceVariable) -> Any:\n return var._get_xla_sharding()", + "docstring": "Returns the XLA sharding associated with the variable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:get_xla_sharding arg:var arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "allocate_groups", + "source_code": "def allocate_groups(self):\n assert config.memory_pool in ('none', 'intermediates', 'outputs', 'combined')\n assert self.buffer_groups is not None\n for group in self.buffer_groups:\n group.make_allocation()\n outputs: list[Allocation] = []\n intermediates: list[Allocation] = []\n for group in self.buffer_groups:\n assert group.allocation\n if group.is_output and config.memory_pool != 'combined':\n outputs.append(group.allocation)\n else:\n intermediates.append(group.allocation)\n for block in sorted(outputs, key=lambda x: (x.size_hint, -len(x.live_range))):\n self.pools.allocate_output(block)\n for block in sorted(intermediates, key=lambda x: (-x.size_hint, -len(x.live_range))):\n self.pools.allocate(block)\n self.pools.finalize()", + "docstring": "Assign every allocation to a specific location in a specific AllocationPool.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:allocate_groups arg:self arguments arg Compare Compare For Call For If BoolOp Compare Call Call For Call arguments arg Call Call For Call arguments arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "emit_pid", + "source_code": "def emit_pid(self, name: str, pid: int) -> None:\n event = {}\n event['name'] = 'process_name'\n event['ph'] = 'M'\n event['pid'] = pid\n event['args'] = {'name': name}\n self._metadata.append(event)", + "docstring": "Adds a process metadata event to the trace. Args: name: The process name as a string. pid: Identifier of the process as an integer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:emit_pid arg:self arg:name arg:pid arguments arg arg arg Assign Assign Assign Assign Assign Call" + }, + { + "library": "sphinx", + "name": "set_translator", + "source_code": "def set_translator(self, name: str, translator_class: type[nodes.NodeVisitor], override: bool=False) -> None:\n self.registry.add_translator(name, translator_class, override=override)", + "docstring": "Register or override a Docutils translator class. This is used to register a custom output translator or to replace a builtin translator. This allows extensions to use a custom translator and define custom nodes for the translator (see :meth:). :param name: The name of the builder for the translator :param translator_class: A translator class :param override: If true, install the translator forcedly even if another translator is already installed as the same name .. versionadded:: 1.3 .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:set_translator arg:self arg:name arg:translator_class arg:override arguments arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "from_float", + "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n assert mod.qconfig, 'Input float module must have a valid qconfig'\n if type_before_parametrizations(mod) == LinearReLU:\n mod = mod[0]\n qconfig = mod.qconfig\n qat_linear = cls(mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig)\n if is_parametrized(mod, 'weight'):\n transfer_parametrizations_and_params(mod, qat_linear, 'weight')\n else:\n qat_linear.weight = mod.weight\n if is_parametrized(mod, 'bias'):\n transfer_parametrizations_and_params(mod, qat_linear, 'bias')\n else:\n qat_linear.bias = mod.bias\n return qat_linear", + "docstring": "Create a qat module from a float module or qparams_dict Args: a float module, either produced by torch.ao.quantization utilities or directly from user", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\linear.py", + "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call If Compare Call Assign Assign Assign Call Compare If Call Call Assign If Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_State", + "source_code": "class _State(str, Enum):\n pass", + "docstring": "Base Class for defining module state to capture snapshots .", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py", + "ast_data": "ClassDef name:_State" + }, + { + "library": "tensorflow", + "name": "call", + "source_code": "@doc_controls.for_subclass_implementers\ndef call(self, inputs, **kwargs):\n return inputs", + "docstring": "This is where the layer's logic lives. Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:call arg:self arg:inputs arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_server_def", + "source_code": "def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n if not server_def:\n raise ValueError('server_def is None.')\n self._server_def = server_def\n if self._context_handle:\n server_def_str = server_def.SerializeToString()\n pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs, server_def_str)\n self._initialize_logical_devices()\n self._clear_caches()\n _device_parsing_cache.clear()", + "docstring": "Allow setting a server_def on the context. When a server def is replaced, it effectively clears a bunch of caches within the context. If you attempt to use a tensor object that was pointing to a tensor on the remote device, it will raise an error. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. keep_alive_secs: Num. seconds after which the remote end will hang up. As long as the client is still alive, the server state for the context will be kept alive. If the client is killed (or there is some failure), the server will clean up its context keep_alive_secs after the final RPC it receives. Raises: ValueError: if server_def is None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:set_server_def arg:self arg:server_def arg:keep_alive_secs arguments arg arg arg If Raise Call Assign If Assign Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "typename", + "source_code": "@set_module('numpy')\ndef typename(char):\n return _namefromtype[char]", + "docstring": "Return a description for the given data type code. Parameters ---------- char : str Data type code. Returns ------- out : str Description of the input data type code. See Also -------- dtype Examples -------- >>> import numpy as np >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: ... print(typechar, ' : ', np.typename(typechar)) ... S1 : character ? : bool B : unsigned char D : complex double precision G : complex long double precision F : complex single precision I : unsigned integer H : unsigned short L : unsigned long integer O : object Q : unsigned long long integer S : string U : unicode V : void b : signed char d : double precision g : long precision f : single precision i : integer h : short l : long integer q : long long integer", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_type_check_impl.py", + "ast_data": "FunctionDef name:typename arg:char arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "generate", + "source_code": "def generate(self, v) -> str:\n val = v.tostring(self.encoding)\n return f'({self.lhs} {self.op} {val})'", + "docstring": "create and return the op string for this TermValue", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:generate arg:self arg:v arguments arg arg Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "transform_keypoints", + "source_code": "def transform_keypoints(self, M: Tensor, inplace: bool=False) -> 'Keypoints3D':\n raise NotImplementedError", + "docstring": "Apply a transformation matrix to the 2D keypoints. Args: M: The transformation matrix to be applied, shape of :math: or :math:. inplace: do transform in-place and return self. Returns: The transformed keypoints.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\keypoints.py", + "ast_data": "FunctionDef name:transform_keypoints arg:self arg:M arg:inplace arguments arg arg arg Raise" + }, + { + "library": "matplotlib", + "name": "set_params", + "source_code": "def set_params(self, **kwargs):\n if 'nbins' in kwargs:\n self._nbins = kwargs.pop('nbins')\n if self._nbins != 'auto':\n self._nbins = int(self._nbins)\n if 'symmetric' in kwargs:\n self._symmetric = kwargs.pop('symmetric')\n if 'prune' in kwargs:\n prune = kwargs.pop('prune')\n _api.check_in_list(['upper', 'lower', 'both', None], prune=prune)\n self._prune = prune\n if 'min_n_ticks' in kwargs:\n self._min_n_ticks = max(1, kwargs.pop('min_n_ticks'))\n if 'steps' in kwargs:\n steps = kwargs.pop('steps')\n if steps is None:\n self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10])\n else:\n self._steps = self._validate_steps(steps)\n self._extended_steps = self._staircase(self._steps)\n if 'integer' in kwargs:\n self._integer = kwargs.pop('integer')\n if kwargs:\n raise _api.kwarg_error('set_params', kwargs)", + "docstring": "Set parameters for this locator. Parameters ---------- nbins : int or 'auto', optional see steps : array-like, optional see integer : bool, optional see symmetric : bool, optional see prune : {'lower', 'upper', 'both', None}, optional see min_n_ticks : int, optional see", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Assign If Compare Assign Call Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call If Raise Call" + }, + { + "library": "scipy", + "name": "milp", + "source_code": "def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):\n args_iv = _milp_iv(c, integrality, bounds, constraints, options)\n c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv\n highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u, lb, ub, integrality, options)\n res = {}\n highs_status = highs_res.get('status', None)\n highs_message = highs_res.get('message', None)\n status, message = _highs_to_scipy_status_message(highs_status, highs_message)\n res['status'] = status\n res['message'] = message\n res['success'] = status == 0\n x = highs_res.get('x', None)\n res['x'] = np.array(x) if x is not None else None\n res['fun'] = highs_res.get('fun', None)\n res['mip_node_count'] = highs_res.get('mip_node_count', None)\n res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)\n res['mip_gap'] = highs_res.get('mip_gap', None)\n return OptimizeResult(res)", + "docstring": "Mixed-integer linear programming Solves problems of the following form: .. math:: \\min_x \\ & c^T x \\\\ \\mbox{such that} \\ & b_l \\leq A x \\leq b_u,\\\\ & l \\leq x \\leq u, \\\\ & x_i \\in \\mathbb{Z}, i \\in X_i where :math: is a vector of decision variables; :math:, :math:, :math:, :math:, and :math: are vectors; :math: is a matrix, and :math: is the set of indices of decision variables that must be integral. (In this context, a variable that can assume only integer values is said to be \"integral\"; it has an \"integrality\" constraint.) Alternatively, that's: minimize:: c @ x such that:: b_l >> import numpy as np >>> c = -np.array([0, 1]) Note the negative sign: we maximize the original objective function by minimizing the negative of the objective function. We collect the coefficients of the constraints into arrays like: >>> A = np.array([[-1, 1], [3, 2], [2, 3]]) >>> b_u = np.array([1, 12, 12]) >>> b_l = np.full_like(b_u, -np.inf, dtype=float) Because there is no lower limit on these constraints, we have defined a variable `scipy.optimize.linprogboundsin the tutorial `.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_milp.py", + "ast_data": "FunctionDef name:milp arg:c arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Compare Assign Call Assign Compare Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Tripod", + "source_code": "class Tripod(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.global_optimum = [[0.0, -50.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n p1 = float(x[0] >= 0)\n p2 = float(x[1] >= 0)\n return p2 * (1.0 + p1) + abs(x[0] + 50.0 * p2 * (1.0 - 2.0 * p1)) + abs(x[1] + 50.0 * (1.0 - 2.0 * p2))", + "docstring": "Tripod objective function. This class defines the Tripod [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Tripod}}(x) = p(x_2) \\left[1 + p(x_1) \\right] + \\lvert x_1 + 50p(x_2) \\left[1 - 2p(x_1) \\right] \\rvert + \\lvert x_2 + 50\\left[1 - 2p(x_2)\\right] \\rvert with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py", + "ast_data": "ClassDef name:Tripod FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Compare Assign Call Compare Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "unify", + "source_code": "@dispatch(object, object, dict)\ndef unify(u, v, s):\n u = walk(u, s)\n v = walk(v, s)\n if u == v:\n return s\n if isvar(u):\n return assoc(s, u, v)\n if isvar(v):\n return assoc(s, v, u)\n return _unify(u, v, s)", + "docstring": "Find substitution so that u == v while satisfying s >>> x = var(\"x\") >>> unify((1, x), (1, 2), {}) {~x: 2}", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\core.py", + "ast_data": "FunctionDef name:unify arg:u arg:v arg:s arguments arg arg arg Assign Call Assign Call If Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_dash_joinstyle", + "source_code": "@_docstring.interpd\ndef set_dash_joinstyle(self, s):\n js = JoinStyle(s)\n if self._dashjoinstyle != js:\n self.stale = True\n self._dashjoinstyle = js", + "docstring": "How to join segments of the line if it . The default joinstyle is :rc:. Parameters ---------- s : or %(JoinStyle)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_dash_joinstyle arg:self arg:s arguments arg arg Assign Call If Compare Assign Assign" + }, + { + "library": "django", + "name": "check_resolver", + "source_code": "def check_resolver(resolver):\n check_method = getattr(resolver, 'check', None)\n if check_method is not None:\n return check_method()\n elif not hasattr(resolver, 'resolve'):\n return get_warning_for_invalid_pattern(resolver)\n else:\n return []", + "docstring": "Recursively check the resolver.", + "type": "function", + "file_path": "django\\django\\core\\checks\\urls.py", + "ast_data": "FunctionDef name:check_resolver arg:resolver arguments arg Assign Call If Compare Return return:yes Call If Call Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, config, start=True):\n if config.dispatcher_address is None:\n raise ValueError('Must specify a `dispatcher_address` in the `config` passed to `WorkerServer`.')\n if isinstance(config, service_config_pb2.WorkerConfig):\n config_proto = config\n else:\n config_proto = service_config_pb2.WorkerConfig(dispatcher_address=config.dispatcher_address, worker_address=config.worker_address, port=config.port, protocol=config.protocol, heartbeat_interval_ms=config.heartbeat_interval_ms, dispatcher_timeout_ms=config.dispatcher_timeout_ms, data_transfer_protocol=config.data_transfer_protocol, data_transfer_address=config.data_transfer_address)\n self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(config_proto.SerializeToString())\n if start:\n self._server.start()", + "docstring": "Creates a new worker server. Args: config: A configuration. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to True.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:config arg:start arguments arg arg arg If Compare Raise Call If Call Assign Assign Call Assign Call Call If Call" + }, + { + "library": "tensorflow", + "name": "isfunction", + "source_code": "def isfunction(object):\n return _inspect.isfunction(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.isfunction.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:isfunction arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "diag_indices_from", + "source_code": "@array_function_dispatch(_diag_indices_from)\ndef diag_indices_from(arr):\n if not arr.ndim >= 2:\n raise ValueError('input array must be at least 2-d')\n if not np.all(diff(arr.shape) == 0):\n raise ValueError('All dimensions of input must be of equal length')\n return diag_indices(arr.shape[0], arr.ndim)", + "docstring": "Return the indices to access the main diagonal of an n-dimensional array. See for full details. Parameters ---------- arr : array, at least 2-D See Also -------- diag_indices Examples -------- >>> import numpy as np Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Get the indices of the diagonal elements. >>> di = np.diag_indices_from(a) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a[di] array([ 0, 5, 10, 15]) This is simply syntactic sugar for diag_indices. >>> np.diag_indices(a.shape[0]) (array([0, 1, 2, 3]), array([0, 1, 2, 3]))", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py", + "ast_data": "FunctionDef name:diag_indices_from arg:arr arguments arg If Compare Raise Call If Call Compare Call Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "sharded_save", + "source_code": "@tf_export('experimental.dtensor.sharded_save', v1=[])\ndef sharded_save(mesh: layout_lib.Mesh, file_prefix: Union[str, tensor_lib.Tensor], tensor_names: Union[List[str], tensor_lib.Tensor], shape_and_slices: Union[List[str], tensor_lib.Tensor], tensors: List[Union[tensor_lib.Tensor, tf_variables.Variable]]):\n with ops.device(api.device_name()):\n io_ops.save_v2(file_prefix, tensor_names, shape_and_slices, tensors)\n mesh_util.barrier(mesh.host_mesh(), 'SaveV2')\n with api.default_mesh(mesh.host_mesh()):\n merge_op = io_ops.MergeV2Checkpoints(checkpoint_prefixes=[file_prefix], destination_prefix=file_prefix, delete_old_dirs=True)\n mesh_util.barrier(mesh.host_mesh(), 'MergeV2Checkpoints')\n return merge_op", + "docstring": "Saves given named tensor slices in a sharded, multi-client safe fashion. The method makes sure the checkpoint directory state is correct in a sharded mutli-client saving. Namely, we place a barrier after SaveV2 to make sure every client has done writing the files. And another one after MergeV2Checkpoints to make sure all Metadata is properly merged. Upon existing, the checkpoint is completed and the all directory operations are done. Args: mesh: The Mesh that contains the Tensors to save. file_prefix: The prefix of checkpoint. tensor_names: a list of tensor names used in save op. shape_and_slices: a list of shape and slice specification used in save op. The only supported value is \"\" as we don't support distributed saving with slices yet. tensors: a list of tensors used in save op. The order should match tensor_names. Returns: A MergeV2Checkpoints op that merged all Metadata.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\save_restore.py", + "ast_data": "FunctionDef name:sharded_save arg:mesh arg:file_prefix arg:tensor_names arg:shape_and_slices arg:tensors arguments arg arg arg arg arg With Call Call Call Call Call With Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_MinimumGrad", + "source_code": "@ops.RegisterGradient('Minimum')\ndef _MinimumGrad(op: ops.Operation, grad):\n return _MaximumMinimumGrad(op, grad, math_ops.less_equal)", + "docstring": "Returns grad*(x y) with type of grad.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_MinimumGrad arg:op arg:grad arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "factorize_from_iterables", + "source_code": "def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]:\n if len(iterables) == 0:\n return ([], [])\n codes, categories = zip(*(factorize_from_iterable(it) for it in iterables))\n return (list(codes), list(categories))", + "docstring": "A higher-level wrapper over . Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See for more info.", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:factorize_from_iterables arg:iterables arguments arg If Compare Call Return return:no Assign Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "ymin", + "source_code": "@property\ndef ymin(self):\n return np.min(self.get_points()[:, 1])", + "docstring": "The bottom edge of the bounding box.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:ymin arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_While", + "source_code": "class _While(_FunctionCaller):\n\n def __init__(self, node, function, enclosing_graph):\n super(_While, self).__init__(node, function, enclosing_graph, first_function_input=0, type_attribute='T', function_attributes=['body', 'cond'])\n\n def convert_variable_to_constant(self, incoming_edge, tensor_data):\n super(_While, self).convert_variable_to_constant(incoming_edge, tensor_data)\n node = self.converted_self()\n if node.node.attr['output_shapes'].list.shape:\n node.node.attr['output_shapes'].list.shape[incoming_edge.destination.index].CopyFrom(tensor_shape_pb2.TensorShapeProto(dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=dim) for dim in tensor_data.numpy.shape]))\n body_name = self._node.attr['body'].func.name\n body = self._enclosing_graph.functions[body_name].converted_self().function\n body.signature.output_arg[incoming_edge.destination.index].type = tensor_data.dtype", + "docstring": "Specialization of _Node to While-like operations.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "ClassDef name:_While FunctionDef name:__init__ arg:self arg:node arg:function arg:enclosing_graph arguments arg arg arg arg Call Call FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Call Call Assign Call If Call Call Call Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, input_bytes):\n self.fdp = atheris.FuzzedDataProvider(input_bytes)", + "docstring": "FuzzingHelper initializer. Args: input_bytes: Input randomized bytes used to create a FuzzedDataProvider.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:input_bytes arguments arg arg Assign Call" + }, + { + "library": "pandas", + "name": "size", + "source_code": "@final\ndef size(self) -> Series:\n ids = self.ids\n ngroups = self.ngroups\n out: np.ndarray | list\n if ngroups:\n out = np.bincount(ids[ids != -1], minlength=ngroups)\n else:\n out = []\n return Series(out, index=self.result_index, dtype='int64', copy=False)", + "docstring": "Compute group sizes.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:size arg:self arguments arg Assign Assign If Assign Call Compare Assign Return return:yes Call" + }, + { + "library": "authlib", + "name": "scope_to_list", + "source_code": "def scope_to_list(scope):\n if isinstance(scope, (tuple, list, set)):\n return [to_unicode(s) for s in scope]\n elif scope is None:\n return None\n return scope.strip().split()", + "docstring": "Convert a space separated string to a list of scopes.", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\util.py", + "ast_data": "FunctionDef name:scope_to_list arg:scope arguments arg If Call Return return:yes Call If Compare Return return:no Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "with_attributes", + "source_code": "@staticmethod\ndef with_attributes(name, checkpointable_objects=None, functions=None, copy_from=None):\n checkpointable_objects = checkpointable_objects or []\n functions = functions or []\n if copy_from is not None:\n for cls in copy_from:\n checkpointable_objects.extend(cls.all_checkpointable_objects)\n functions.extend(cls.all_functions)\n classdict = {'all_checkpointable_objects': set(checkpointable_objects), 'all_functions': set(functions)}\n return type(name, (SerializedAttributes,), classdict)", + "docstring": "Creates a subclass with all attributes as specified in the arguments. Args: name: Name of subclass checkpointable_objects: List of checkpointable objects to be serialized in the SavedModel. functions: List of functions to be serialized in the SavedModel. copy_from: List of other SerializedAttributes subclasses. The returned class will copy checkpoint objects/functions from each subclass. Returns: Child class with attributes as defined in the and lists.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "FunctionDef name:with_attributes arg:name arg:checkpointable_objects arg:functions arg:copy_from arguments arg arg arg arg Assign BoolOp Assign BoolOp If Compare For Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reset_peak_memory_stats", + "source_code": "def reset_peak_memory_stats(device: Optional[_device_t]=None) -> None:\n if not is_initialized():\n return\n torch._C._mtia_resetPeakMemoryStats(_get_device_index(device, optional=True))", + "docstring": "Reset the peak memory stats for a given device. Args: device (torch.device, str, or int, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default).", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\memory.py", + "ast_data": "FunctionDef name:reset_peak_memory_stats arg:device arguments arg If Call Return return:no Call Call" + }, + { + "library": "pytorch", + "name": "__iter__", + "source_code": "def __iter__(self):\n return iter(self._node)", + "docstring": "Iterate over the nodes.", + "type": "method", + "file_path": "pytorch\\torch\\package\\_digraph.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "disable_graph_collection", + "source_code": "def disable_graph_collection():\n context().disable_graph_collection()", + "docstring": "Disables graph collection of executed functions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:disable_graph_collection arguments Call Call" + }, + { + "library": "pytorch", + "name": "ones", + "source_code": "def ones(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n return full(sharding_spec, size, fill_value=1, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)", + "docstring": "Returns a :class: with the scalar value 1. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", + "ast_data": "FunctionDef name:ones arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "deconv_output_length", + "source_code": "def deconv_output_length(input_length, filter_size, padding, stride):\n if input_length is None:\n return None\n input_length *= stride\n if padding == 'valid':\n input_length += max(filter_size - stride, 0)\n elif padding == 'full':\n input_length -= stride + filter_size - 2\n return input_length", + "docstring": "Determines output length of a transposed convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. Returns: The output length (integer).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py", + "ast_data": "FunctionDef name:deconv_output_length arg:input_length arg:filter_size arg:padding arg:stride arguments arg arg arg arg If Compare Return return:no If Compare Call If Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "check_dependency", + "source_code": "def check_dependency(partition):\n visited: set[Partition] = {partition}\n queue: deque[Partition] = deque([partition])\n while queue:\n p = queue.popleft()\n for child in p.children:\n if child == partition:\n return True\n elif child not in visited:\n visited.add(child)\n queue.append(child)\n return False", + "docstring": "Given a partition,check if there is a circular dependency on this partition using bfs", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:check_dependency arg:partition arguments arg Call While Assign Call For If Compare Return return:yes If Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "celu", + "source_code": "def celu(input: Tensor, scale: float, zero_point: int, alpha: float=1.0) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.celu' must be quantized!\")\n return torch.ops.quantized.celu(input, scale, zero_point, alpha)", + "docstring": "celu(input, scale, zero_point, alpha=1.) -> Tensor Applies the quantized CELU function element-wise. .. math:: \\text{CELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x / \\alpha) - 1)) Args: input: quantized input alpha: the :math: value for the CELU formulation. Default: 1.0", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:celu arg:input arg:scale arg:zero_point arg:alpha arguments arg arg arg arg If Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_scope", + "source_code": "def _get_scope(node_name):\n if not node_name:\n raise ValueError(f'Node name cannot be empty or None. Received: {node_name}.')\n if node_name.startswith('^'):\n node_name = node_name[1:]\n if '/' in node_name:\n scope, _ = node_name.rsplit('/', 1)\n return scope\n return ''", + "docstring": "Extract the scope name from a node name. The scope name is everything before the final slash, not including any ^ prefix denoting a control dependency. Args: node_name: the full name of an Op or a Tensor in the graph. Returns: The deepest named scope containing the node. Raises: ValueError: if tensor_name is None or empty", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", + "ast_data": "FunctionDef name:_get_scope arg:node_name arguments arg If Raise Call If Call Assign If Compare Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "conjugate", + "source_code": "def conjugate(self) -> 'SymFloat':\n return self", + "docstring": "Returns the complex conjugate of the float.", + "type": "method", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:conjugate arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "partial_tile", + "source_code": "def partial_tile(tensor, tile_assignment, use_sharding_op=False, unspecified_dims=None):\n return Sharding.partial_tile(tile_assignment).apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])", + "docstring": "Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. It must have one more dimension than tensor, and the last dimension represents partially replicated tiles. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", + "ast_data": "FunctionDef name:partial_tile arg:tensor arg:tile_assignment arg:use_sharding_op arg:unspecified_dims arguments arg arg arg arg Return return:yes Call Call BoolOp" + }, + { + "library": "tensorflow", + "name": "wait_for_other_workers", + "source_code": "def wait_for_other_workers():\n return dc_context.get_current_worker_context().wait_for_other_workers()", + "docstring": "Waits for other workers to reach the same call to this method.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", + "ast_data": "FunctionDef name:wait_for_other_workers arguments Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "getargs", + "source_code": "def getargs(co):\n if not iscode(co):\n raise TypeError('arg is not a code object')\n nargs = co.co_argcount\n names = co.co_varnames\n args = list(names[:nargs])\n for i in range(nargs):\n if args[i][:1] in ['', '.']:\n raise TypeError('tuple function arguments are not supported')\n varargs = None\n if co.co_flags & CO_VARARGS:\n varargs = co.co_varnames[nargs]\n nargs = nargs + 1\n varkw = None\n if co.co_flags & CO_VARKEYWORDS:\n varkw = co.co_varnames[nargs]\n return (args, varargs, varkw)", + "docstring": "Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None.", + "type": "function", + "file_path": "numpy\\numpy\\_utils\\_inspect.py", + "ast_data": "FunctionDef name:getargs arg:co arguments arg If Call Raise Call Assign Assign Assign Call For Call If Compare Raise Call Assign If Assign Assign Assign If Assign Return return:yes" + }, + { + "library": "pandas", + "name": "conda_package_to_pip", + "source_code": "def conda_package_to_pip(package: str):\n package = re.sub('(?<=[^<>~])=', '==', package).strip()\n for compare in ('<=', '>=', '=='):\n if compare in package:\n pkg, version = package.split(compare)\n if pkg in EXCLUDE:\n return\n if pkg in REMAP_VERSION:\n return ''.join((pkg, compare, REMAP_VERSION[pkg]))\n if pkg in CONDA_TO_PIP:\n return ''.join((CONDA_TO_PIP[pkg], compare, version))\n if package in EXCLUDE:\n return\n if package in CONDA_TO_PIP:\n return CONDA_TO_PIP[package]\n return package", + "docstring": "Convert a conda package to its pip equivalent. In most cases they are the same, those are the exceptions: - Packages that should be excluded (in ) - Packages that should be renamed (in ) - A package requiring a specific version, in conda is defined with a single equal (e.g. ``)", + "type": "function", + "file_path": "pandas\\scripts\\generate_pip_deps_from_conda.py", + "ast_data": "FunctionDef name:conda_package_to_pip arg:package arguments arg Assign Call Call For If Compare Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:no If Compare Return return:yes Return return:yes" + }, + { + "library": "cherrypy", + "name": "__call__", + "source_code": "@staticmethod\ndef __call__(**kwargs):\n\n def tool_decorator(f):\n _Vars(f).setdefault('_cp_config', {}).update(kwargs)\n return f\n return tool_decorator", + "docstring": "Decorate for page handlers to set _cp_config.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpconfig.py", + "ast_data": "FunctionDef name:__call__ arguments arg FunctionDef name:tool_decorator arg:f arguments arg Call Call Call Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "FormatError", + "source_code": "class FormatError(OSError):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg", + "docstring": "Exception thrown when there is a problem parsing a configuration file.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py", + "ast_data": "ClassDef name:FormatError FunctionDef name:__init__ arg:self arg:msg arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_generate_info_dict", + "source_code": "def _generate_info_dict(self, model: GraphModule) -> dict[str, dict]:\n info_dict: dict[str, dict] = {}\n for fqn, module in model.named_modules():\n if self._supports_report_gen(module):\n pre_obs: ModelReportObserver = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)\n num_batches: torch.Tensor = pre_obs.percentile_batches_tracked\n average_ratios: torch.Tensor = pre_obs.average_percentile_ratio\n channel_batch_cnts: torch.Tensor = pre_obs.constant_channels\n total_batches: int = pre_obs.num_batches_tracked\n max_vals: torch.Tensor = pre_obs.max_val\n for index, ratio_val in enumerate(average_ratios):\n if ratio_val.item() < 0:\n average_ratios[index] = -ratio_val\n if ratio_val.item() < 1:\n average_ratios[index] = 1 / ratio_val\n outlier_calcs = self._calculate_outlier_info(average_ratios, num_batches, total_batches)\n info_dict[fqn] = {self.CHANNEL_AXIS_KEY: self.ch_axis, self.REF_PERCENTILE_KEY: self.reference_percentile, self.RATIO_THRES_KEY: self.ratio_threshold, self.COMP_METRIC_KEY: average_ratios, self.NUM_BATCHES_KEY: num_batches, self.OUTLIER_KEY: outlier_calcs[self.OUTLIER_KEY], self.IS_SUFFICIENT_BATCHES_KEY: outlier_calcs[self.IS_SUFFICIENT_BATCHES_KEY], self.CONSTANT_COUNTS_KEY: channel_batch_cnts, self.MAX_VALS_KEY: max_vals}\n return info_dict", + "docstring": "Helper function for generate_detector_report that does the generation of the dictionary. This process is done as specified in generate_detector_report documentation Args: model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers Returns a dict mapping relevant module fqns to: whether there were outliers found in activation before the number of batches used for each channel whether fraction of applicable batches used is above fraction_batches_used_threshold their p_r metric compared to the threshold the threshold used to make the recommendation the reference_percentile used to make the recommendation the channel axis used to determine individual channels the constant batch counts per channel the per channel max values", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:_generate_info_dict arg:self arg:model arguments arg arg For Call If Call Call For Call If Compare Call Assign If Compare Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X):\n check_is_fitted(self)\n ovr = self.multi_class in ['ovr', 'warn'] or (self.multi_class in ['auto', 'deprecated'] and (self.classes_.size <= 2 or self.solver == 'liblinear'))\n if ovr:\n return super()._predict_proba_lr(X)\n else:\n decision = self.decision_function(X)\n if decision.ndim == 1:\n decision_2d = np.c_[-decision, decision]\n else:\n decision_2d = decision\n return softmax(decision_2d, copy=False)", + "docstring": "Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multi_class problem, if multi_class is set to be \"multinomial\" the softmax function is used to find the predicted probability of each class. Else use a one-vs-rest approach, i.e. calculate the probability of each class assuming it to be positive using the logistic function and normalize these values across all the classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where is the number of samples and is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign BoolOp Compare BoolOp Compare BoolOp Compare Compare If Return return:yes Call Call Assign Call If Compare Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "annotate", + "source_code": "def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:\n for module_name, quantization_config in self.module_name_qconfig.items():\n self._annotate_with_config(model, quantization_config, _create_module_name_filter(module_name))\n for operator_type, quantization_config in self.operator_type_qconfig.items():\n self._annotate_with_config(model, quantization_config, _create_operator_type_filter(operator_type))\n if self.global_config:\n self._annotate_with_config(model, self.global_config, _global_config_filter)\n self._annotate_output_for_int8_in_int8_out_pattern_entry(model)\n return model", + "docstring": "Annotate the given model with quantization configurations. Annotation contracts: 1. Annotate each node according to the user's qconfig in the following order: , , and . 2. Avoid re-annotating nodes already annotated in prior stages. For example, if has been annotated by , it won't be annotated again during the processing of the 'operator_type_qconfig' or 'global_config'. 3. For config is , the node will be annotated with . For each pair of (module_name_or_operator_type_or_global, qconfig), a filter function is created. This filter function checks if the node is marked by current stage and not annotated by the previous stage.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py", + "ast_data": "FunctionDef name:annotate arg:self arg:model arguments arg arg For Call Call Call For Call Call Call If Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_center", + "source_code": "def get_center(self):\n return self.get_patch_transform().transform((0.5, 0.5))", + "docstring": "Return the centre of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_center arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "init", + "source_code": "def init(self):\n if context.executing_eagerly() and self._closed:\n raise RuntimeError(f'SummaryWriter {self!r} is already closed')\n return self._init_op", + "docstring": "See .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:init arg:self arguments arg If BoolOp Call Raise Call Return return:yes" + }, + { + "library": "django", + "name": "difference", + "source_code": "def difference(self, other):\n return self._topology(capi.geos_difference(self.ptr, other.ptr))", + "docstring": "Return a Geometry representing the points making up this Geometry that do not make up other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:difference arg:self arg:other arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "entropy", + "source_code": "def entropy(self, mean=None, cov=1):\n dim, mean, cov_object = self._process_parameters(mean, cov)\n return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet)", + "docstring": "Compute the differential entropy of the multivariate normal. Parameters ---------- %(_mvn_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_mvn_doc_callparams_note)s", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:entropy arg:self arg:mean arg:cov arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_kern_dist_from_name", + "source_code": "def get_kern_dist_from_name(self, name1, name2):\n return self._kern.get((name1, name2), 0)", + "docstring": "Return the kerning pair distance (possibly 0) for chars *name1* and *name2*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:get_kern_dist_from_name arg:self arg:name1 arg:name2 arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_contains", + "source_code": "def _contains(self, event):\n return self._selection_artist.contains(event, radius=0)[0]", + "docstring": "Return True if event is within the patch.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_contains arg:self arg:event arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "UnknownVariable", + "source_code": "class UnknownVariable(VariableTracker):\n pass", + "docstring": "It could be anything!", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py", + "ast_data": "ClassDef name:UnknownVariable" + }, + { + "library": "tensorflow", + "name": "_py_if_stmt", + "source_code": "def _py_if_stmt(cond, body, orelse):\n return body() if cond else orelse()", + "docstring": "Overload of if_stmt that executes a Python if statement.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_py_if_stmt arg:cond arg:body arg:orelse arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "geos", + "source_code": "@property\ndef geos(self):\n if self.geos_support:\n from django.contrib.gis.geos import GEOSGeometry\n return GEOSGeometry(self._geos_ptr(), self.srid)\n else:\n from django.contrib.gis.geos import GEOSException\n raise GEOSException(f'GEOS does not support {self.__class__.__qualname__}.')", + "docstring": "Return a GEOSGeometry object from this OGRGeometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:geos arg:self arguments arg If Return return:yes Call Call Raise Call" + }, + { + "library": "scrapy", + "name": "item_completed", + "source_code": "def item_completed(self, results: list[FileInfoOrError], item: Any, info: SpiderInfo) -> Any:\n if self.LOG_FAILED_RESULTS:\n for ok, value in results:\n if not ok:\n assert isinstance(value, Failure)\n logger.error('%(class)s found errors processing %(item)s', {'class': self.__class__.__name__, 'item': item}, exc_info=failure_to_exc_info(value), extra={'spider': info.spider})\n return item", + "docstring": "Called per item when all media requests has been processed", + "type": "method", + "file_path": "scrapy\\scrapy\\pipelines\\media.py", + "ast_data": "FunctionDef name:item_completed arg:self arg:results arg:item arg:info arguments arg arg arg arg If For If Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "_tocomplex", + "source_code": "def _tocomplex(arr):\n if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, nt.ushort, nt.csingle)):\n return arr.astype(nt.csingle)\n else:\n return arr.astype(nt.cdouble)", + "docstring": "Convert its input to a complex array. The input is returned as a complex array of the smallest type that will fit the original data: types like single, byte, short, etc. become csingle, while others become cdouble. A copy of the input is always made. Parameters ---------- arr : array Returns ------- array An array with the same input data as the input but in complex form. Examples -------- >>> import numpy as np First, consider an input of type short: >>> a = np.array([1,2,3],np.short) >>> ac = np.lib.scimath._tocomplex(a); ac array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> ac.dtype dtype('complex64') If the input is of type double, the output is correspondingly of the complex double type as well: >>> b = np.array([1,2,3],np.double) >>> bc = np.lib.scimath._tocomplex(b); bc array([1.+0.j, 2.+0.j, 3.+0.j]) >>> bc.dtype dtype('complex128') Note that even if the input was complex to begin with, a copy is still made, since the astype() method always copies: >>> c = np.array([1,2,3],np.csingle) >>> cc = np.lib.scimath._tocomplex(c); cc array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> c *= 2; c array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) >>> cc array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_scimath_impl.py", + "ast_data": "FunctionDef name:_tocomplex arg:arr arguments arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "ihfft", + "source_code": "@_dispatch\ndef ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *, plan=None):\n return (Dispatchable(x, np.ndarray),)", + "docstring": "Compute the inverse FFT of a signal that has Hermitian symmetry. Parameters ---------- x : array_like Input array. n : int, optional Length of the inverse FFT, the number of points along transformation axis in the input to use. If is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If is not given, the length of the input along the axis specified by is used. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.fftaxisaxishfftihfftrfftirffthfft`, within roundoff error. Examples -------- >>> from scipy.fft import ifft, ihfft >>> import numpy as np >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> ifft(spectrum) array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary >>> ihfft(spectrum) array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_basic.py", + "ast_data": "FunctionDef name:ihfft arg:x arg:n arg:axis arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "split_by_sparsity", + "source_code": "def split_by_sparsity(values):\n dense_values = []\n dense_indices = []\n sparse_values = []\n sparse_indices = []\n for i, v in enumerate(values):\n if is_indexed_slices(v):\n sparse_values.append(v)\n sparse_indices.append(i)\n else:\n dense_values.append(v)\n dense_indices.append(i)\n return (dense_values, dense_indices, sparse_values, sparse_indices)", + "docstring": "Split values into dense and sparse values. Args: values: a list of tensors or s. Returns: Four lists: a list of dense values, a list of their indices in and a list of sparse values, a list of their indices in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", + "ast_data": "FunctionDef name:split_by_sparsity arg:values arguments arg Assign Assign Assign Assign For Call If Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ResultType", + "source_code": "class ResultType:\n _vals: dict[ComboType, Status]\n\n def __repr__(self) -> str:\n return f'ResultType[{self._vals}]'\n\n def __init__(self) -> None:\n self._vals = {}\n\n def __len__(self) -> int:\n return len(self._vals)\n\n def num_ran(self) -> int:\n ret = len(self._vals)\n for status in self._vals.values():\n if status == Status.SKIPPED:\n ret -= 1\n return ret\n\n def set(self, combo: ComboType, status: Status) -> None:\n combo = tuple(sorted(combo))\n self._vals[combo] = status\n\n def lookup(self, combo: ComboType) -> Optional[Status]:\n combo = tuple(sorted(combo))\n return self._vals.get(combo, None)\n\n def keys(self) -> KeysView[ComboType]:\n return self._vals.keys()", + "docstring": "The mapping of the combo strings to the result status after running the config fuzzer.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "ClassDef name:ResultType FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:num_ran arg:self arguments arg Assign Call For Call If Compare Return return:yes FunctionDef name:set arg:self arg:combo arg:status arguments arg arg arg Assign Call Call Assign FunctionDef name:lookup arg:self arg:combo arguments arg arg Assign Call Call Return return:yes Call FunctionDef name:keys arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__iter__", + "source_code": "def __iter__(self):\n return iter(self._vars)", + "docstring": "Return an iterable for accessing the underlying sharded variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_node_dependencies", + "source_code": "def _get_node_dependencies(self, proto):\n dependencies = {ref.local_name: ref.node_id for ref in proto.dependencies}\n kind = proto.WhichOneof('kind')\n if kind == 'function':\n concrete_functions = proto.function.concrete_functions\n for fn_name in concrete_functions:\n for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n dependencies[bound_input] = bound_input\n elif kind == 'bare_concrete_function':\n fn_name = proto.bare_concrete_function.concrete_function_name\n for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n dependencies[bound_input] = bound_input\n elif kind == 'resource':\n for child in proto.children:\n if child.local_name == '_create_resource':\n dependencies['_create_resource'] = child.node_id\n return dependencies", + "docstring": "Returns a dictionary of all dependencies of an object. Args: proto: A SavedObject proto. Returns: Dict mapping string dependency name *or* int node id to the node id. The int node id key is used for mapping function captures.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py", + "ast_data": "FunctionDef name:_get_node_dependencies arg:self arg:proto arguments arg arg Assign Assign Call If Compare Assign For For Assign If Compare Assign For Assign If Compare For If Compare Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "connect", + "source_code": "def connect(self, event: str, callback: Callable[..., Any], priority: int=500) -> int:\n listener_id = self.events.connect(event, callback, priority)\n logger.debug('[app] connecting event %r (%d): %r [id=%s]', event, priority, callback, listener_id)\n return listener_id", + "docstring": "Register *callback* to be called when *event* is emitted. For details on available core events and the arguments of callback functions, please see :ref:. :param event: The name of target event :param callback: Callback function for the event :param priority: The priority of the callback. The callbacks will be invoked in order of *priority* (ascending). :return: A listener ID. It can be used for :meth:. .. versionchanged:: 3.0 Support *priority*", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:connect arg:self arg:event arg:callback arg:priority arguments arg arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "GroupFusion", + "source_code": "class GroupFusion(GroupBatchFusionBase):\n pass", + "docstring": "Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py", + "ast_data": "ClassDef name:GroupFusion" + }, + { + "library": "tensorflow", + "name": "get_dense_tensor", + "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n if isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError('In embedding_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n return self._get_dense_tensor_internal(transformation_cache, state_manager)", + "docstring": "Returns the embedding lookup result.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "softmax", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef softmax(x, axis=-1):\n return nn.softmax(x, axis=axis)", + "docstring": "Softmax of a tensor. Args: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:softmax arg:x arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "used_dims_from_index", + "source_code": "def used_dims_from_index(self, index: sympy.Expr):\n used_dims = OrderedSet[sympy.Symbol]()\n for sym in index.free_symbols:\n assert isinstance(sym, sympy.Symbol)\n if symbol_is_type(sym, SymT.TMP):\n cse_var = self.lookup_cse_var(sym.name)\n assert isinstance(cse_var, HalideCSEVariable) and cse_var.used_dims is not None\n used_dims.update(cse_var.used_dims)\n elif symbol_is_type(sym, SymT.HALIDE):\n used_dims.add(sym)\n elif symbol_is_type(sym, (SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX)):\n pass\n else:\n raise NotImplementedError(f'unhandled symbol {sym}')\n return self.sort_used_dims(used_dims)", + "docstring": "Detect which range trees are used to populate HalideCSEVariable.used_dims", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py", + "ast_data": "FunctionDef name:used_dims_from_index arg:self arg:index arguments arg arg Assign Call For Call If Call Assign Call BoolOp Call Compare Call If Call Call If Call Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "open", + "source_code": "def open(self, name, mode='rb'):\n return self._open(name, mode)", + "docstring": "Retrieve the specified file from storage.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:open arg:self arg:name arg:mode arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "IntegerArray", + "source_code": "class IntegerArray(NumericArray):\n _dtype_cls = IntegerDtype", + "docstring": "Array of integer (optional missing) values. Uses :attr: as the missing value. .. warning:: IntegerArray is currently experimental, and its API or internal implementation may change without warning. We represent an IntegerArray with 2 numpy arrays: - data: contains a numpy integer array of the appropriate dtype - mask: a boolean array holding a mask on the data, True is missing To construct an IntegerArray from generic array-like input, use :func: with one of the integer dtypes (see examples). See :ref: for more. Parameters ---------- values : numpy.ndarray A 1-d integer-dtype array. mask : numpy.ndarray A 1-d boolean-dtype array indicating missing values. copy : bool, default False Whether to copy the and . Attributes ---------- None Methods ------- None Returns ------- IntegerArray See Also -------- array : Create an array using the appropriate dtype, including `pandas.array`. >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype()) >>> int_array [1, , 3] Length: 3, dtype: Int32 String aliases for the dtypes are also available. They are capitalized. >>> pd.array([1, None, 3], dtype=\"Int32\") [1, , 3] Length: 3, dtype: Int32 >>> pd.array([1, None, 3], dtype=\"UInt16\") [1, , 3] Length: 3, dtype: UInt16", + "type": "class", + "file_path": "pandas\\pandas\\core\\arrays\\integer.py", + "ast_data": "ClassDef name:IntegerArray Assign" + }, + { + "library": "matplotlib", + "name": "thetagrids", + "source_code": "def thetagrids(angles: ArrayLike | None=None, labels: Sequence[str | Text] | None=None, fmt: str | None=None, **kwargs) -> tuple[list[Line2D], list[Text]]:\n ax = gca()\n if not isinstance(ax, PolarAxes):\n raise RuntimeError('thetagrids only defined for polar Axes')\n if all((param is None for param in [angles, labels, fmt])) and (not kwargs):\n lines_out: list[Line2D] = ax.xaxis.get_ticklines()\n labels_out: list[Text] = ax.xaxis.get_ticklabels()\n elif angles is None:\n raise TypeError(\"'angles' cannot be None when other parameters are passed\")\n else:\n lines_out, labels_out = ax.set_thetagrids(angles, labels=labels, fmt=fmt, **kwargs)\n return (lines_out, labels_out)", + "docstring": "Get or set the theta gridlines on the current polar plot. Call signatures:: lines, labels = thetagrids() lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs) When called with no arguments, simply returns the tuple (*lines*, *labels*). When called with arguments, the labels will appear at the specified angles. Parameters ---------- angles : tuple with floats, degrees The angles of the theta gridlines. labels : tuple with strings or None The labels to use at each radial gridline. The will be used if None. fmt : str or None Format string used in . For example '%f'. Note that the angle in radians will be used. Returns ------- lines : list of The theta gridlines. labels : list of The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional properties for the labels. See Also -------- .pyplot.rgrids .projections.polar.PolarAxes.set_thetagrids .Axis.get_gridlines .Axis.get_ticklabels Examples -------- :: # set the locations of the angular gridlines lines, labels = thetagrids(range(45, 360, 90)) # set the locations and labels of the angular gridlines lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE'))", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:thetagrids arg:angles arg:labels arg:fmt arguments arg arg arg arg Assign Call If Call Raise Call If BoolOp Call Compare Call Call If Compare Raise Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ConvBn3d", + "source_code": "class ConvBn3d(_FusedModule):\n\n def __init__(self, conv, bn):\n assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'\n super().__init__(conv, bn)", + "docstring": "This is a sequential container which calls the Conv 3d and Batch Norm 3d modules. During quantization this will be replaced with the corresponding fused module.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", + "ast_data": "ClassDef name:ConvBn3d FunctionDef name:__init__ arg:self arg:conv arg:bn arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "num_records_produced", + "source_code": "def num_records_produced(self, name=None):\n if self._reader_ref.dtype == dtypes.resource:\n return gen_io_ops.reader_num_records_produced_v2(self._reader_ref, name=name)\n else:\n return gen_io_ops.reader_num_records_produced(self._reader_ref, name=name)", + "docstring": "Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", + "ast_data": "FunctionDef name:num_records_produced arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "has_default_writer", + "source_code": "def has_default_writer():\n return _summary_state.writer is not None", + "docstring": "Returns a boolean indicating whether a default summary writer exists.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:has_default_writer arguments Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "ensure_common_namespace_device", + "source_code": "def ensure_common_namespace_device(reference, *arrays):\n xp, is_array_api = get_namespace(reference)\n if is_array_api:\n device_ = device(reference)\n return [xp.asarray(a, device=device_) for a in arrays]\n else:\n return arrays", + "docstring": "Ensure that all arrays use the same namespace and device as reference. If necessary the arrays are moved to the same namespace and device as the reference array. Parameters ---------- reference : array Reference array. *arrays : array Arrays to check. Returns ------- arrays : list Arrays with the same namespace and device as reference.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:ensure_common_namespace_device arg:reference arguments arg arg Assign Call If Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "start_loop", + "source_code": "def start_loop(self):\n pass", + "docstring": "Called when the thread starts.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:start_loop arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "_maybe_assert_valid_sample", + "source_code": "def _maybe_assert_valid_sample(self, counts):\n if not self.validate_args:\n return counts\n counts = distribution_util.embed_check_nonnegative_integer_form(counts)\n return control_flow_ops.with_dependencies([check_ops.assert_equal(self.total_count, math_ops.reduce_sum(counts, -1), message='counts must sum to `self.total_count`')], counts)", + "docstring": "Check counts for proper shape, values, then return tensor version.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py", + "ast_data": "FunctionDef name:_maybe_assert_valid_sample arg:self arg:counts arguments arg arg If Return return:yes Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "wrapped", + "source_code": "@functools.wraps(orig_fn)\ndef wrapped(*args, **kwargs):\n proxy = _find_proxy(args, kwargs)\n if proxy is not None:\n return_proxy = proxy.tracer.create_proxy('call_function', orig_fn, args, kwargs)\n return_proxy.node.meta['is_wrapped'] = True\n return return_proxy\n return orig_fn(*args, **kwargs)", + "docstring": "Given an closed-over `` node to preserve the call to this leaf function directly. Otherwise, just return the results of this function call, as this function is not being traced.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:wrapped arguments arg arg Assign Call If Compare Assign Call Assign Return return:yes Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_forward_arg_names", + "source_code": "def _get_forward_arg_names(mod: torch.nn.Module, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]]=None) -> list[str]:\n sig = inspect.signature(mod.forward)\n _args = sig.bind_partial(*args).arguments\n names: list[str] = []\n for name, value in _args.items():\n if sig.parameters[name].kind == inspect._ParameterKind.VAR_POSITIONAL:\n names.extend([f'{name}_{i}' for i, _ in enumerate(value)])\n else:\n names.append(name)\n if kwargs:\n names.extend([kwarg for kwarg, _ in kwargs.items()])\n return names", + "docstring": "Gets the argument names to forward that are used, for restoring the original signature when unlifting the exported program module. - Positional args: retain the original argument names, and enumerate *args as args_0, args_1, ... - Keyword args: retain the original kwarg names in the order specified by the user. This order seems to matter for the current state of export lifted modules.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_get_forward_arg_names arg:mod arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call For Call If Compare Call Call Call If Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_routed_params_for_fit", + "source_code": "def _get_routed_params_for_fit(self, params):\n if _routing_enabled():\n routed_params = process_routing(self, 'fit', **params)\n else:\n params = params.copy()\n groups = params.pop('groups', None)\n routed_params = Bunch(estimator=Bunch(fit=params), splitter=Bunch(split={'groups': groups}), scorer=Bunch(score={}))\n if params.get('sample_weight') is not None and self._check_scorers_accept_sample_weight():\n routed_params.scorer.score['sample_weight'] = params['sample_weight']\n return routed_params", + "docstring": "Get the parameters to be used for routing. This is a method instead of a snippet in ``.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:_get_routed_params_for_fit arg:self arg:params arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call Call Call Call If BoolOp Compare Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "QuantizationConfig", + "source_code": "@tf_export('tpu.experimental.embedding.QuantizationConfig')\nclass QuantizationConfig:\n\n def __init__(self, num_buckets: int, lower: float, upper: float):\n if num_buckets < 2:\n raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')\n self.num_buckets = num_buckets\n self.lower = lower\n self.upper = upper\n\n def _set_optimization_parameters(self, parameters: optimization_parameters_pb2.OptimizationParameters):\n parameters.simulated_quantization.enabled = True\n parameters.simulated_quantization.num_buckets = self.num_buckets\n parameters.simulated_quantization.clipping_limits.lower.value = self.lower\n parameters.simulated_quantization.clipping_limits.upper.value = self.upper\n\n def __repr__(self):\n return 'QuantizationConfig(num_buckets={num_buckets!r}, lower={lower!r}, upper={upper!r})'.format(num_buckets=self.num_buckets, lower=self.lower, upper=self.upper)", + "docstring": "Settings for simulated quantization of the tpu embedding table. When simulated quantization is enabled, the results of the embedding lookup are clipped and quantized according to the settings here before the combiner is applied. For example, to quantize the following is done: See tensorflow/core/protobuf/tpu/optimization_parameters.proto for more details. NOTE: This does not change the storage type of the embedding table, that will continue to be float32 as will the saved variable in the checkpoint. You will have to manually quantize the variable (typically with the same algorithm and settings as above) manually.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", + "ast_data": "ClassDef name:QuantizationConfig FunctionDef name:__init__ arg:self arg:num_buckets arg:lower arg:upper arguments arg arg arg arg If Compare Raise Call Assign Assign Assign FunctionDef name:_set_optimization_parameters arg:self arg:parameters arguments arg arg Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "slerp", + "source_code": "def slerp(self, q1: 'Quaternion', t: float) -> 'Quaternion':\n KORNIA_CHECK_TYPE(q1, Quaternion)\n q0 = self.normalize()\n q1 = q1.normalize()\n return q0 * (q0.inv() * q1) ** t", + "docstring": "Return a unit quaternion spherically interpolated between quaternions self.q and q1. See more: Args: q1: second quaternion to be interpolated between. t: interpolation ratio, range [0-1] Example: >>> q0 = Quaternion.identity() >>> q1 = Quaternion(torch.tensor([1., .5, 0., 0.])) >>> q2 = q0.slerp(q1, .3)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:slerp arg:self arg:q1 arg:t arguments arg arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, patch: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_SHAPE(patch, ['B', '1', 'H', 'W'])\n self.weighting = self.weighting.to(patch.dtype).to(patch.device)\n grads: torch.Tensor = self.gradient(patch) * self.weighting\n gx: torch.Tensor = grads[:, :, 0]\n gy: torch.Tensor = grads[:, :, 1]\n ellipse_shape = torch.cat([gx.pow(2).mean(dim=2).mean(dim=2, keepdim=True), (gx * gy).mean(dim=2).mean(dim=2, keepdim=True), gy.pow(2).mean(dim=2).mean(dim=2, keepdim=True)], dim=2)\n bad_mask = ((ellipse_shape < self.eps).float().sum(dim=2, keepdim=True) >= 2).to(ellipse_shape.dtype)\n circular_shape = torch.tensor([1.0, 0.0, 1.0]).to(ellipse_shape.device).to(ellipse_shape.dtype).view(1, 1, 3)\n ellipse_shape = ellipse_shape * (1.0 - bad_mask) + circular_shape * bad_mask\n ellipse_shape = ellipse_shape / ellipse_shape.max(dim=2, keepdim=True)[0]\n return ellipse_shape", + "docstring": "Run forward. Args: patch: :math: Returns: torch.Tensor: ellipse_shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\feature\\affine_shape.py", + "ast_data": "FunctionDef name:forward arg:self arg:patch arguments arg arg Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call Call Assign Call Compare Call Call Compare Assign Call Call Call Call Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "use_solver", + "source_code": "def use_solver(**kwargs):\n global useUmfpack\n if 'useUmfpack' in kwargs:\n useUmfpack.u = kwargs['useUmfpack']\n if useUmfpack.u and 'assumeSortedIndices' in kwargs:\n umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])", + "docstring": "Select default sparse direct solver to be used. Parameters ---------- useUmfpack : bool, optional Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only if `` to gain some speed. References ---------- .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern multifrontal method with a column pre-ordering strategy, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. .. [2] T. A. Davis, A column pre-ordering strategy for the unsymmetric-pattern multifrontal method, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 165--195. .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal method for unsymmetric sparse matrices, ACM Trans. on Mathematical Software, 25(1), 1999, pp. 1--19. .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal method for sparse LU factorization, SIAM J. Matrix Analysis and Computations, 18(1), 1997, pp. 140--158. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import use_solver, spsolve >>> from scipy.sparse import csc_array >>> R = np.random.randn(5, 5) >>> A = csc_array(R) >>> b = np.random.randn(5) >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK >>> x = spsolve(A, b) >>> np.allclose(A.dot(x), b) True >>> use_solver(useUmfpack=True) # reset umfPack usage to default", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_dsolve\\linsolve.py", + "ast_data": "FunctionDef name:use_solver arguments arg If Compare Assign If BoolOp Compare Call" + }, + { + "library": "pytorch", + "name": "load_state_dict_from_url", + "source_code": "def load_state_dict_from_url(url: str, model_dir: Optional[str]=None, map_location: MAP_LOCATION=None, progress: bool=True, check_hash: bool=False, file_name: Optional[str]=None, weights_only: bool=False) -> dict[str, Any]:\n if os.getenv('TORCH_MODEL_ZOO'):\n warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')\n if model_dir is None:\n hub_dir = get_dir()\n model_dir = os.path.join(hub_dir, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n if file_name is not None:\n filename = file_name\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stdout.write(f'Downloading: \"{url}\" to {cached_file}\\n')\n hash_prefix = None\n if check_hash:\n r = HASH_REGEX.search(filename)\n hash_prefix = r.group(1) if r else None\n download_url_to_file(url, cached_file, hash_prefix, progress=progress)\n if _is_legacy_zip_format(cached_file):\n return _legacy_zip_load(cached_file, model_dir, map_location, weights_only)\n return torch.load(cached_file, map_location=map_location, weights_only=weights_only)", + "docstring": "Loads the Torch serialized object at the given URL. If downloaded file is a zip file, it will be automatically decompressed. If the object is already present in , it's deserialized and returned. The default value of `~torch.hub.get_dir``~torch.load` for more details. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) >>> state_dict = torch.hub.load_state_dict_from_url( ... \" ... )", + "type": "function", + "file_path": "pytorch\\torch\\hub.py", + "ast_data": "FunctionDef name:load_state_dict_from_url arg:url arg:model_dir arg:map_location arg:progress arg:check_hash arg:file_name arg:weights_only arguments arg arg arg arg arg arg arg If Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Assign Call If Call Call Assign If Assign Call Assign Call Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "request_is_alias", + "source_code": "def request_is_alias(item):\n if item in VALID_REQUEST_VALUES:\n return False\n return isinstance(item, str) and item.isidentifier()", + "docstring": "Check if an item is a valid alias. Values in `` are not considered aliases in this context. Only a string which is a valid identifier is. Parameters ---------- item : object The given item to be checked if it can be an alias. Returns ------- result : bool Whether the given item is a valid alias.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:request_is_alias arg:item arguments arg If Compare Return return:yes Return return:yes BoolOp Call Call" + }, + { + "library": "django", + "name": "login_required", + "source_code": "def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n actual_decorator = user_passes_test(lambda u: u.is_authenticated, login_url=login_url, redirect_field_name=redirect_field_name)\n if function:\n return actual_decorator(function)\n return actual_decorator", + "docstring": "Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\decorators.py", + "ast_data": "FunctionDef name:login_required arg:function arg:redirect_field_name arg:login_url arguments arg arg arg Assign Call arguments arg If Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "build_results", + "source_code": "def build_results(self, session, tensor_values):\n full_values = []\n assert len(self._final_fetches) == len(tensor_values)\n i = 0\n j = 0\n for is_op in self._ops:\n if is_op:\n full_values.append(None)\n else:\n if self._fetches[i].ref() in self._feed_handles:\n value = self._feed_handles[self._fetches[i].ref()].eval()\n else:\n value = self._feeds.get(self._fetches[i].ref())\n if value is None:\n value = tensor_values[j]\n j += 1\n dtype = self._fetch_handles.get(self._fetches[i].ref())\n if dtype:\n full_values.append(session_ops.TensorHandle(value, dtype, session))\n else:\n full_values.append(value)\n i += 1\n assert j == len(tensor_values)\n return self._fetch_mapper.build_results(full_values)", + "docstring": "Build results matching the original fetch shape. must be a list of the same length as the one returned by , and holding the requested fetch values. This method builds a struct with the same shape as the original passed to the constructor, in which the fetches are replaced by their fetched value. Args: session: The enclosing session. Used for tensor handles. tensor_values: List of values matching the list returned by fetches(). Returns: A structure of the same shape as the original argument but containing tensors or None (for fetched ops).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:build_results arg:self arg:session arg:tensor_values arguments arg arg arg Assign Compare Call Call Assign Assign For If Call If Compare Call Assign Call Call Assign Call Call If Compare Assign Assign Call Call If Call Call Call Compare Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "_singular_range_check", + "source_code": "def _singular_range_check(ranged_factor: Tensor, name: str, bounds: Optional[Tuple[float, float]]=None, skip_none: bool=False, mode: str='2d') -> None:\n if mode == '2d':\n dim_size = 2\n elif mode == '3d':\n dim_size = 3\n else:\n raise ValueError(f\"'mode' shall be either 2d or 3d. Got {mode}\")\n if skip_none and ranged_factor is None:\n return\n if bounds is None:\n bounds = (float('-inf'), float('inf'))\n if ranged_factor.dim() == 1 and len(ranged_factor) == dim_size:\n for f in ranged_factor:\n if not bounds[0] <= f <= bounds[1]:\n raise ValueError(f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.')\n else:\n raise TypeError(f'{name} should be a float number or a tuple with length {dim_size} whose values between {bounds}.Got {ranged_factor}')", + "docstring": "Check if bounds[0] <= ranged_factor[0] <= bounds[1] and bounds[0] <= ranged_factor[1] <= bounds[1].", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py", + "ast_data": "FunctionDef name:_singular_range_check arg:ranged_factor arg:name arg:bounds arg:skip_none arg:mode arguments arg arg arg arg arg If Compare Assign If Compare Assign Raise Call If BoolOp Compare Return return:no If Compare Assign Call Call If BoolOp Compare Call Compare Call For If Compare Raise Call Raise Call" + }, + { + "library": "pytorch", + "name": "check", + "source_code": "@deprecated('`torch._prims_common.check` is deprecated and will be removed in the future. Please use `torch._check*` functions instead.', category=FutureWarning)\ndef check(b: bool, s: Callable[[], str], exc_type: type[Exception]=RuntimeError) -> None:\n torch._check_with(exc_type, b, s)", + "docstring": "Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails. Error message is a callable producing a string (to avoid wasting time string formatting in non-error case, and also to make it easier for torchdynamo to trace.) .. note:: This function is planned for removal in the future. Please use functions instead.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:check arg:b arg:s arg:exc_type arguments arg arg arg Call Call" + }, + { + "library": "pandas", + "name": "rec_array_to_mgr", + "source_code": "def rec_array_to_mgr(data: np.rec.recarray | np.ndarray, index, columns, dtype: DtypeObj | None, copy: bool) -> Manager:\n fdata = ma.getdata(data)\n if index is None:\n index = default_index(len(fdata))\n else:\n index = ensure_index(index)\n if columns is not None:\n columns = ensure_index(columns)\n arrays, arr_columns = to_arrays(fdata, columns)\n arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))\n if columns is None:\n columns = arr_columns\n mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype)\n if copy:\n mgr = mgr.copy()\n return mgr", + "docstring": "Extract from a masked rec array and create the manager.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\construction.py", + "ast_data": "FunctionDef name:rec_array_to_mgr arg:data arg:index arg:columns arg:dtype arg:copy arguments arg arg arg arg arg Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Call If Compare Assign Assign Call If Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None):\n xp, _ = get_namespace(X)\n return float(xp.mean(self.score_samples(X)))", + "docstring": "Return the average log-likelihood of all samples. See. \"Pattern Recognition and Machine Learning\" by C. Bishop, 12.2.1 p. 574 or Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : Ignored Ignored. Returns ------- ll : float Average log-likelihood of the samples under the current model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "scrapy", + "name": "StopDownload", + "source_code": "class StopDownload(Exception):\n\n def __init__(self, *, fail: bool=True):\n super().__init__()\n self.fail = fail", + "docstring": "Stop the download of the body for a given response. The 'fail' boolean parameter indicates whether or not the resulting partial response should be handled by the request errback. Note that 'fail' is a keyword-only argument.", + "type": "class", + "file_path": "scrapy\\scrapy\\exceptions.py", + "ast_data": "ClassDef name:StopDownload FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign" + }, + { + "library": "pandas", + "name": "get_compressed_ids", + "source_code": "def get_compressed_ids(labels, sizes: Shape) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]:\n ids = get_group_index(labels, sizes, sort=True, xnull=False)\n return compress_group_index(ids, sort=True)", + "docstring": "Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : tuple[int] of size of the levels Returns ------- np.ndarray[np.intp] comp_ids np.ndarray[np.int64] obs_group_ids", + "type": "function", + "file_path": "pandas\\pandas\\core\\sorting.py", + "ast_data": "FunctionDef name:get_compressed_ids arg:labels arg:sizes arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "escape", + "source_code": "def escape(self, s: str) -> str:\n s = s.replace('@', '@@')\n s = s.replace('{', '@{')\n s = s.replace('}', '@}')\n s = s.replace('``', '`@w{`}')\n s = s.replace(\"''\", \"'@w{'}\")\n return s", + "docstring": "Return a string with Texinfo command characters escaped.", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\texinfo.py", + "ast_data": "FunctionDef name:escape arg:self arg:s arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "fp16_compress_hook", + "source_code": "def fp16_compress_hook(process_group: dist.ProcessGroup, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n return _compress_hook(torch.float16, process_group, bucket)", + "docstring": "Compress by casting ``). Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(process_group, fp16_compress_hook)", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py", + "ast_data": "FunctionDef name:fp16_compress_hook arg:process_group arg:bucket arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "equal", + "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef equal(x1, x2):\n return compare_chararrays(x1, x2, '==', True)", + "docstring": "Return (x1 == x2) element-wise. Unlike , this comparison is performed by first stripping whitespace characters from the end of the string. This behavior is provided for backward-compatibility with numarray. Parameters ---------- x1, x2 : array_like of str or unicode Input arrays of the same shape. Returns ------- out : ndarray Output array of bools. Examples -------- >>> import numpy as np >>> y = \"aa \" >>> x = \"aa\" >>> np.char.equal(x, y) array(True) See Also -------- not_equal, greater_equal, less_equal, greater, less", + "type": "function", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:equal arg:x1 arg:x2 arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "@available_if(_can_transform)\ndef transform(self, X, **params):\n with _raise_or_warn_if_not_fitted(self):\n _raise_for_params(params, self, 'transform')\n routed_params = process_routing(self, 'transform', **params)\n Xt = X\n for _, name, transform in self._iter():\n Xt = transform.transform(Xt, **routed_params[name].transform)\n return Xt", + "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . This also works where final estimator is in which case all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if . See :ref: for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg arg With Call Call Assign Call Assign For Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "tiecorrect", + "source_code": "def tiecorrect(rankvals):\n arr = np.sort(rankvals)\n idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]\n cnt = np.diff(idx).astype(np.float64)\n size = np.float64(arr.size)\n return 1.0 if size < 2 else 1.0 - (cnt ** 3 - cnt).sum() / (size ** 3 - size)", + "docstring": "Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests. Parameters ---------- rankvals : array_like A 1-D sequence of ranks. Typically this will be the array returned by . Returns ------- factor : float Correction factor for U or H. See Also -------- rankdata : Assign ranks to the data mannwhitneyu : Mann-Whitney rank test kruskal : Kruskal-Wallis H test References ---------- .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Examples -------- >>> from scipy.stats import tiecorrect, rankdata >>> tiecorrect([1, 2.5, 2.5, 4]) 0.9 >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) >>> ranks array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) >>> tiecorrect(ranks) 0.9833333333333333", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:tiecorrect arg:rankvals arguments arg Assign Call Assign Call Compare Assign Call Call Assign Call Return return:yes Compare Call" + }, + { + "library": "matplotlib", + "name": "minorticks_on", + "source_code": "def minorticks_on(self):\n self.ax.minorticks_on()\n self._short_axis().set_minor_locator(ticker.NullLocator())", + "docstring": "Turn on colorbar minor ticks.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:minorticks_on arg:self arguments arg Call Call Call Call" + }, + { + "library": "pygame", + "name": "add_internal", + "source_code": "def add_internal(self, group):\n self.__g.add(group)", + "docstring": "For adding this sprite to a group internally. :param group: The group we are adding to.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:add_internal arg:self arg:group arguments arg arg Call" + }, + { + "library": "scipy", + "name": "Problem03", + "source_code": "class Problem03(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(-10, 10)]\n self.global_optimum = -6.7745761\n self.fglob = -12.03124\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n y = 0.0\n for k in range(1, 6):\n y += k * sin((k + 1) * x + k)\n return -y", + "docstring": "Univariate Problem03 objective function. This class defines the Univariate Problem03 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem03}}(x) = - \\sum_{k=1}^6 k \\sin[(k+1)x+k] Bound constraints: :math: .. figure:: figures/Problem03.png :alt: Univariate Problem03 function :align: center **Univariate Problem03 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign For Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, loss=None, predictions=None, metrics=None):\n if loss is not None:\n loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)\n self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)\n if predictions is not None:\n pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME)\n self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME)\n if metrics is not None:\n self._metrics = self._wrap_and_check_metrics(metrics)", + "docstring": "Constructor for SupervisedOutput (ie, Train or Eval output). Args: loss: dict of Tensors or single Tensor representing calculated loss. predictions: dict of Tensors or single Tensor representing model predictions. metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Raises: ValueError: if any of the outputs' dict keys are not strings or tuples of strings or the values are not Tensors (or Operations in the case of update_op).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:loss arg:predictions arg:metrics arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "new_figure_manager", + "source_code": "@classmethod\ndef new_figure_manager(cls, num, *args, **kwargs):\n from matplotlib.figure import Figure\n fig_cls = kwargs.pop('FigureClass', Figure)\n fig = fig_cls(*args, **kwargs)\n return cls.new_figure_manager_given_figure(num, fig)", + "docstring": "Create a new figure manager instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:new_figure_manager arg:cls arg:num arguments arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "normkey", + "source_code": "def normkey(self, key: AnyStr) -> AnyStr:\n return key.lower()", + "docstring": "Method to normalize dictionary key access", + "type": "method", + "file_path": "scrapy\\scrapy\\utils\\datatypes.py", + "ast_data": "FunctionDef name:normkey arg:self arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "list_local_devices", + "source_code": "def list_local_devices(session_config=None):\n\n def _convert(pb_str):\n m = device_attributes_pb2.DeviceAttributes()\n m.ParseFromString(pb_str)\n return m\n serialized_config = None\n if session_config is not None:\n serialized_config = session_config.SerializeToString()\n return [_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)]", + "docstring": "List the available devices available in the local process. Args: session_config: a session config proto or None to use the default config. Returns: A list of protocol buffers.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\client\\device_lib.py", + "ast_data": "FunctionDef name:list_local_devices arg:session_config arguments arg FunctionDef name:_convert arg:pb_str arguments arg Assign Call Call Return return:yes Assign If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n self._fit(X)\n return self", + "docstring": "Fit the model with X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Ignored. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "django", + "name": "wait_for_apps_ready", + "source_code": "def wait_for_apps_ready(self, app_reg, django_main_thread):\n while django_main_thread.is_alive():\n if app_reg.ready_event.wait(timeout=0.1):\n return True\n else:\n logger.debug('Main Django thread has terminated before apps are ready.')\n return False", + "docstring": "Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event.", + "type": "method", + "file_path": "django\\django\\utils\\autoreload.py", + "ast_data": "FunctionDef name:wait_for_apps_ready arg:self arg:app_reg arg:django_main_thread arguments arg arg arg While Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "SwappableTuple", + "source_code": "class SwappableTuple(tuple):\n\n def __new__(cls, value, setting):\n self = tuple.__new__(cls, value)\n self.setting = setting\n return self", + "docstring": "Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\migration.py", + "ast_data": "ClassDef name:SwappableTuple FunctionDef name:__new__ arg:cls arg:value arg:setting arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "nunique", + "source_code": "def nunique(x: Array, /, *, xp: ModuleType | None=None) -> Array:\n if xp is None:\n xp = array_namespace(x)\n if is_jax_array(x):\n _, counts = xp.unique_counts(x, size=_compat.size(x))\n return xp.astype(counts, xp.bool).sum()\n _, counts = xp.unique_counts(x)\n n = _compat.size(counts)\n if n is None:\n return xp.astype(counts, xp.bool).sum()\n return xp.asarray(n, device=_compat.device(x))", + "docstring": "Count the number of unique elements in an array. Compatible with JAX and Dask, whose laziness would be otherwise problematic. Parameters ---------- x : Array Input array. xp : array_namespace, optional The standard-compatible namespace for . Default: infer. Returns ------- array: 0-dimensional integer array The number of unique elements in . It can be lazy.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", + "ast_data": "FunctionDef name:nunique arguments arg arg If Compare Assign Call If Call Assign Call Call Return return:yes Call Call Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "object_name", + "source_code": "def object_name(self) -> str:\n return None", + "docstring": "Returns the local name of the object being restored. Override this method when the local name of object is different than in the checkpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py", + "ast_data": "FunctionDef name:object_name arg:self arguments arg Return return:no" + }, + { + "library": "pytorch", + "name": "load", + "source_code": "@_dcp_method_logger(log_exceptions=True)\n@_api_bc_check\ndef load(state_dict: dict[str, Any], *, checkpoint_id: Union[str, os.PathLike, None]=None, storage_reader: Optional[StorageReader]=None, planner: Optional[LoadPlanner]=None, process_group: Optional[dist.ProcessGroup]=None, no_dist: bool=False) -> None:\n no_dist = no_dist or not dist.is_available() or (not dist.is_initialized())\n if no_dist:\n warnings.warn('torch.distributed is disabled, unavailable or uninitialized, assuming the intent is to load in a single process.')\n with _profile():\n storage_reader = cast(StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True))\n keys = sorted(state_dict.keys())\n statetful_sd = {}\n for key in keys:\n if key not in state_dict:\n continue\n elem = state_dict[key]\n statetful_sd[key] = elem.state_dict() if isinstance(elem, Stateful) else elem\n _load_state_dict(state_dict=statetful_sd, storage_reader=storage_reader, process_group=process_group, no_dist=no_dist, planner=planner)\n for key in keys:\n if key not in state_dict:\n continue\n elem = state_dict[key]\n if isinstance(elem, Stateful):\n elem.load_state_dict(statetful_sd[key])\n else:\n state_dict[key] = statetful_sd[key]", + "docstring": "Load a checkpoint into a distributed state dict in SPMD style. Each rank must have the same keys in their `state_dictShardedTensorDTensortorch.load()load_state_dict`.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_loader.py", + "ast_data": "FunctionDef name:load arg:state_dict arguments arg arg arg arg arg arg Assign BoolOp Call Call If Call With Call Assign Call Call Assign Call Call Assign For If Compare Assign Assign Call Call Call For If Compare Assign If Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "HandlerPathCollection", + "source_code": "class HandlerPathCollection(HandlerRegularPolyCollection):\n\n def create_collection(self, orig_handle, sizes, offsets, offset_transform):\n return type(orig_handle)([orig_handle.get_paths()[0]], sizes=sizes, offsets=offsets, offset_transform=offset_transform)", + "docstring": "Handler for \\s, which are used by .", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", + "ast_data": "ClassDef name:HandlerPathCollection FunctionDef name:create_collection arg:self arg:orig_handle arg:sizes arg:offsets arg:offset_transform arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "diag", + "source_code": "@abstractmethod\ndef diag(self, X):\n pass", + "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples,) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg" + }, + { + "library": "django", + "name": "__len__", + "source_code": "def __len__(self):\n if self.empty:\n return 0\n if self.hasz:\n return 3\n else:\n return 2", + "docstring": "Return the number of dimensions for this Point (either 0, 2 or 3).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg If Return return:yes If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "flatten_parameters", + "source_code": "def flatten_parameters(self) -> None:\n if len(self._flat_weights) != len(self._flat_weights_names):\n return\n for w in self._flat_weights:\n if not isinstance(w, Tensor):\n return\n first_fw = self._flat_weights[0]\n dtype = first_fw.dtype\n for fw in self._flat_weights:\n if not isinstance(fw, Tensor) or not fw.dtype == dtype or (not fw.is_cuda) or (not torch.backends.cudnn.is_acceptable(fw)):\n return\n unique_data_ptrs = {p.data_ptr() for p in self._flat_weights}\n if len(unique_data_ptrs) != len(self._flat_weights):\n return\n with torch.cuda.device_of(first_fw):\n import torch.backends.cudnn.rnn as rnn\n with torch.no_grad():\n if torch._use_cudnn_rnn_flatten_weight():\n num_weights = 4 if self.bias else 2\n if self.proj_size > 0:\n num_weights += 1\n torch._cudnn_rnn_flatten_weight(self._flat_weights, num_weights, self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.proj_size, self.num_layers, self.batch_first, bool(self.bidirectional))", + "docstring": "Reset parameter data pointer so that they can use faster code paths. Right now, this works only if the module is on the GPU and cuDNN is enabled. Otherwise, it's a no-op.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\rnn.py", + "ast_data": "FunctionDef name:flatten_parameters arg:self arguments arg If Compare Call Call Return return:no For If Call Return return:no Assign Assign For If BoolOp Call Compare Call Return return:no Assign Call If Compare Call Call Return return:no With Call With Call If Call Assign If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "Tape", + "source_code": "class Tape(object):\n __slots__ = ['_tape']\n\n def __init__(self, tape):\n self._tape = tape\n\n def watched_variables(self):\n return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape)", + "docstring": "Represents a gradient propagation trace.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py", + "ast_data": "ClassDef name:Tape Assign FunctionDef name:__init__ arg:self arg:tape arguments arg arg Assign FunctionDef name:watched_variables arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "no_regularizer", + "source_code": "@tf_export(v1=['no_regularizer'])\ndef no_regularizer(_):\n return None", + "docstring": "Use this function to prevent regularization of variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:no_regularizer arg:_ arguments arg Return return:no Call" + }, + { + "library": "tensorflow", + "name": "run", + "source_code": "def run(self, fn, args=(), kwargs=None, options=None):\n return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)", + "docstring": "Run on each replica, with the given arguments. In , is simply called within a device scope for the given device, with the provided arguments. Args: fn: The function to run. The output must be a of s. args: (Optional) Positional arguments to . kwargs: (Optional) Keyword arguments to . options: (Optional) An instance of specifying the options to run . Returns: Return value from running .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "parse_cachecontrol", + "source_code": "def parse_cachecontrol(header: bytes) -> dict[bytes, bytes | None]:\n directives = {}\n for directive in header.split(b','):\n key, sep, val = directive.strip().partition(b'=')\n if key:\n directives[key.lower()] = val if sep else None\n return directives", + "docstring": "Parse Cache-Control header >>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None, ... b'max-age': b'3600'} True >>> parse_cachecontrol(b'') == {} True", + "type": "function", + "file_path": "scrapy\\scrapy\\extensions\\httpcache.py", + "ast_data": "FunctionDef name:parse_cachecontrol arg:header arguments arg Assign For Call Assign Call Call If Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, offsetbox, xy, xybox=None, xycoords='data', boxcoords=None, *, frameon=True, pad=0.4, annotation_clip=None, box_alignment=(0.5, 0.5), bboxprops=None, arrowprops=None, fontsize=None, **kwargs):\n martist.Artist.__init__(self)\n mtext._AnnotationBase.__init__(self, xy, xycoords=xycoords, annotation_clip=annotation_clip)\n self.offsetbox = offsetbox\n self.arrowprops = arrowprops.copy() if arrowprops is not None else None\n self.set_fontsize(fontsize)\n self.xybox = xybox if xybox is not None else xy\n self.boxcoords = boxcoords if boxcoords is not None else xycoords\n self._box_alignment = box_alignment\n if arrowprops is not None:\n self._arrow_relpos = self.arrowprops.pop('relpos', (0.5, 0.5))\n self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **self.arrowprops)\n else:\n self._arrow_relpos = None\n self.arrow_patch = None\n self.patch = FancyBboxPatch(xy=(0.0, 0.0), width=1.0, height=1.0, facecolor='w', edgecolor='k', mutation_scale=self.prop.get_size_in_points(), snap=True, visible=frameon)\n self.patch.set_boxstyle('square', pad=pad)\n if bboxprops:\n self.patch.set(**bboxprops)\n self._internal_update(kwargs)", + "docstring": "Parameters ---------- offsetbox : xy : (float, float) The point *(x, y)* to annotate. The coordinate system is determined by *xycoords*. xybox : (float, float), default: *xy* The position *(x, y)* to place the text at. The coordinate system is determined by *boxcoords*. xycoords : single or two-tuple of str or or or callable, default: 'data' The coordinate system that *xy* is given in. See the parameter *xycoords* in for a detailed description. boxcoords : single or two-tuple of str or or or callable, default: value of *xycoords* The coordinate system that *xybox* is given in. See the parameter *textcoords* in for a detailed description. frameon : bool, default: True By default, the text is surrounded by a white (accessible as the `.AnnotationBbox.FancyBboxPatch.Annotation.FancyBboxPatch.Textlegend.fontsize.Text.set_fontsizeAnnotationBbox.AnnotationBbox.set` for a list.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:offsetbox arg:xy arg:xybox arg:xycoords arg:boxcoords arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Compare Call Call Assign Compare Assign Compare Assign If Compare Assign Call Assign Call Assign Assign Assign Call Call Call If Call Call" + }, + { + "library": "django", + "name": "y", + "source_code": "def y(self):\n return '%02d' % (self.data.year % 100)", + "docstring": "Year, 2 digits with leading zeros; e.g. '99'.", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "validate_grant_types", + "source_code": "def validate_grant_types(self):\n self._validate_claim_value('grant_types')", + "docstring": "Array of OAuth 2.0 grant type strings that the client can use at the token endpoint.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py", + "ast_data": "FunctionDef name:validate_grant_types arg:self arguments arg Call" + }, + { + "library": "pygame", + "name": "render", + "source_code": "def render(self, text, antialias, color, background=None):\n if text is None:\n text = ''\n if isinstance(text, str) and self.__unull in text:\n raise ValueError('A null character was found in the text')\n if isinstance(text, bytes) and self.__bnull in text:\n raise ValueError('A null character was found in the text')\n save_antialiased = self.antialiased\n self.antialiased = bool(antialias)\n try:\n s, _ = super().render(text, color, background)\n return s\n finally:\n self.antialiased = save_antialiased", + "docstring": "render(text, antialias, color, background=None) -> Surface draw text on a new Surface", + "type": "method", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:render arg:self arg:text arg:antialias arg:color arg:background arguments arg arg arg arg arg If Compare Assign If BoolOp Call Compare Raise Call If BoolOp Call Compare Raise Call Assign Assign Call Try Assign Call Call Return return:yes Assign" + }, + { + "library": "django", + "name": "touch", + "source_code": "def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):\n raise NotImplementedError('subclasses of BaseCache must provide a touch() method')", + "docstring": "Update the key's expiry time using timeout. Return True if successful or False if the key does not exist.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:touch arg:self arg:key arg:timeout arg:version arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_tf_tensor_list_set_item", + "source_code": "def _tf_tensor_list_set_item(target, i, x):\n return list_ops.tensor_list_set_item(target, i, x)", + "docstring": "Overload of set_item that stages a Tensor list update.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py", + "ast_data": "FunctionDef name:_tf_tensor_list_set_item arg:target arg:i arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, x, pos=None):\n raise NotImplementedError('Derived must override')", + "docstring": "Return the format for tick value *x* at position pos. `` indicates an unspecified location.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Raise Call" + }, + { + "library": "pandas", + "name": "has_dropped_na", + "source_code": "@final\n@cache_readonly\ndef has_dropped_na(self) -> bool:\n return bool((self.ids < 0).any())", + "docstring": "Whether grouper has null value(s) that are dropped.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:has_dropped_na arg:self arguments arg Return return:yes Call Call Compare" + }, + { + "library": "scrapy", + "name": "media_downloaded", + "source_code": "@abstractmethod\ndef media_downloaded(self, response: Response, request: Request, info: SpiderInfo, *, item: Any=None) -> FileInfo:\n raise NotImplementedError", + "docstring": "Handler for success downloads", + "type": "method", + "file_path": "scrapy\\scrapy\\pipelines\\media.py", + "ast_data": "FunctionDef name:media_downloaded arg:self arg:response arg:request arg:info arguments arg arg arg arg arg Raise" + }, + { + "library": "scipy", + "name": "mquantiles_cimj", + "source_code": "def mquantiles_cimj(data, prob=(0.25, 0.5, 0.75), alpha=0.05, axis=None):\n alpha = min(alpha, 1 - alpha)\n z = norm.ppf(1 - alpha / 2.0)\n xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)\n smj = mjci(data, prob, axis=axis)\n return (xq - z * smj, xq + z * smj)", + "docstring": "Computes the alpha confidence interval for the selected quantiles of the data, with Maritz-Jarrett estimators. Parameters ---------- data : ndarray Data array. prob : sequence, optional Sequence of quantiles to compute. alpha : float, optional Confidence level of the intervals. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. Returns ------- ci_lower : ndarray The lower boundaries of the confidence interval. Of the same length as . ci_upper : ndarray The upper boundaries of the confidence interval. Of the same length as .", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_extras.py", + "ast_data": "FunctionDef name:mquantiles_cimj arg:data arg:prob arg:alpha arg:axis arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_replace_coerce", + "source_code": "@final\ndef _replace_coerce(self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool=True, regex: bool=False) -> list[Block]:\n if should_use_regex(regex, to_replace):\n return self._replace_regex(to_replace, value, inplace=inplace, mask=mask)\n else:\n if value is None:\n if mask.any():\n has_ref = self.refs.has_reference()\n nb = self.astype(np.dtype(object))\n if not inplace:\n nb = nb.copy()\n elif inplace and has_ref and nb.refs.has_reference():\n nb = nb.copy()\n putmask_inplace(nb.values, mask, value)\n return [nb]\n return [self.copy(deep=False)]\n return self.replace(to_replace=to_replace, value=value, inplace=inplace, mask=mask)", + "docstring": "Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block]", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:_replace_coerce arg:self arg:to_replace arg:value arg:mask arg:inplace arg:regex arguments arg arg arg arg arg arg If Call Return return:yes Call If Compare If Call Assign Call Assign Call Call If Assign Call If BoolOp Call Assign Call Call Return return:yes Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "device", + "source_code": "class device:\n\n def __init__(self, device: Any):\n self.idx = _get_device_index(device, optional=True)\n self.prev_idx = -1\n\n def __enter__(self):\n self.prev_idx = torch.cuda._exchange_device(self.idx)\n\n def __exit__(self, type: Any, value: Any, traceback: Any):\n self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)\n return False", + "docstring": "Context-manager that changes the selected device. Args: device (torch.device or int): device index to select. It's a no-op if this argument is a negative integer or ``.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "ClassDef name:device FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "gray", + "source_code": "def gray() -> None:\n set_cmap('gray')", + "docstring": "Set the colormap to 'gray'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:gray arguments Call" + }, + { + "library": "scipy", + "name": "describe", + "source_code": "def describe(a, axis=0, ddof=0, bias=True):\n a, axis = _chk_asarray(a, axis)\n n = a.count(axis)\n mm = (ma.minimum.reduce(a, axis=axis), ma.maximum.reduce(a, axis=axis))\n m = a.mean(axis)\n v = a.var(axis, ddof=ddof)\n sk = skew(a, axis, bias=bias)\n kurt = kurtosis(a, axis, bias=bias)\n return DescribeResult(n, mm, m, v, sk, kurt)", + "docstring": "Computes several descriptive statistics of the passed array. Parameters ---------- a : array_like Data array axis : int or None, optional Axis along which to calculate statistics. Default 0. If None, compute over the whole array . ddof : int, optional degree of freedom (default 0); note that default ddof is different from the same routine in stats.describe bias : bool, optional If False, then the skewness and kurtosis calculations are corrected for statistical bias. Returns ------- nobs : int (size of the data (discarding missing values) minmax : (int, int) min, max mean : float arithmetic mean variance : float unbiased variance skewness : float biased skewness kurtosis : float biased kurtosis Examples -------- >>> import numpy as np >>> from scipy.stats.mstats import describe >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) >>> describe(ma) DescribeResult(nobs=np.int64(3), minmax=(masked_array(data=0, mask=False, fill_value=999999), masked_array(data=2, mask=False, fill_value=999999)), mean=np.float64(1.0), variance=np.float64(0.6666666666666666), skewness=masked_array(data=0., mask=False, fill_value=1e+20), kurtosis=np.float64(-1.5))", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:describe arg:a arg:axis arg:ddof arg:bias arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_num_samples_or_steps", + "source_code": "def _get_num_samples_or_steps(data, steps_per_epoch):\n flat_inputs = nest.flatten(data)\n if hasattr(flat_inputs[0], 'shape'):\n return (int(flat_inputs[0].shape[0]), False)\n return (steps_per_epoch, True)", + "docstring": "Returns number of samples or steps, and whether to use steps count mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py", + "ast_data": "FunctionDef name:_get_num_samples_or_steps arg:data arg:steps_per_epoch arguments arg arg Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_tensors_from_tensor_names", + "source_code": "def get_tensors_from_tensor_names(graph, tensor_names):\n tensor_name_to_tensor = {}\n for op in graph.get_operations():\n for tensor in op.values():\n tensor_name_to_tensor[get_tensor_name(tensor)] = tensor\n tensors = []\n invalid_tensors = []\n for name in tensor_names:\n if not isinstance(name, str):\n raise ValueError(\"Invalid type for a tensor name in the provided graph. Expected type for a tensor name is 'str', instead got type '{}' for tensor name '{}'\".format(type(name), name))\n tensor = tensor_name_to_tensor.get(name)\n if tensor is None:\n invalid_tensors.append(name)\n else:\n tensors.append(tensor)\n if invalid_tensors:\n raise ValueError(\"Invalid tensors '{}' were found.\".format(','.join(invalid_tensors)))\n return tensors", + "docstring": "Gets the Tensors associated with the in the provided graph. Args: graph: TensorFlow Graph. tensor_names: List of strings that represent names of tensors in the graph. Returns: A list of Tensor objects in the same order the names are provided. Raises: ValueError: tensor_names contains an invalid tensor name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:get_tensors_from_tensor_names arg:graph arg:tensor_names arguments arg arg Assign For Call For Call Assign Call Assign Assign For If Call Raise Call Call Call Assign Call If Compare Call Call If Raise Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "remove", + "source_code": "def remove(self, a):\n self._mapping.pop(a, {a}).remove(a)\n self._ordering.pop(a, None)", + "docstring": "Remove *a* from the grouper, doing nothing if it is not there.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:remove arg:self arg:a arguments arg arg Call Call Call" + }, + { + "library": "django", + "name": "decode", + "source_code": "def decode(self, encoded):\n raise NotImplementedError('subclasses of BasePasswordHasher must provide a decode() method.')", + "docstring": "Return a decoded database value. The result is a dictionary and should contain , , and . Extra keys can be algorithm specific like or .", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:decode arg:self arg:encoded arguments arg arg Raise Call" + }, + { + "library": "scipy", + "name": "friedmanchisquare", + "source_code": "def friedmanchisquare(*args):\n data = argstoarray(*args).astype(float)\n k = len(data)\n if k < 3:\n raise ValueError(f'Less than 3 groups ({k}): the Friedman test is NOT appropriate.')\n ranked = ma.masked_values(rankdata(data, axis=0), 0)\n if ranked._mask is not nomask:\n ranked = ma.mask_cols(ranked)\n ranked = ranked.compressed().reshape(k, -1).view(ndarray)\n else:\n ranked = ranked._data\n k, n = ranked.shape\n repeats = [find_repeats(row) for row in ranked.T]\n ties = np.array([y for x, y in repeats if x.size > 0])\n tie_correction = 1 - (ties ** 3 - ties).sum() / float(n * (k ** 3 - k))\n ssbg = np.sum((ranked.sum(-1) - n * (k + 1) / 2.0) ** 2)\n chisq = ssbg * 12.0 / (n * k * (k + 1)) * 1.0 / tie_correction\n return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))", + "docstring": "Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. Each input is considered a given group. Ideally, the number of treatments among each group should be equal. If this is not the case, only the first n treatments are taken into account, where n is the number of treatments of the smallest group. If a group has some missing values, the corresponding treatments are masked in the other groups. The test statistic is corrected for ties. Masked values in one group are propagated to the other groups. Returns ------- statistic : float the test statistic. pvalue : float the associated p-value.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:friedmanchisquare arguments arg Assign Call Call Assign Call If Compare Raise Call Assign Call Call If Compare Assign Call Assign Call Call Call Assign Assign Assign Call Assign Call Compare Assign Call Call Assign Call Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "summary_scope", + "source_code": "@contextlib.contextmanager\ndef summary_scope(name, family=None, default_name=None, values=None):\n name = clean_tag(name)\n family = clean_tag(family)\n scope_base_name = name if family is None else '{}/{}'.format(family, name)\n with ops.name_scope(scope_base_name, default_name, values, skip_on_eager=False) as scope:\n if family is None:\n tag = scope.rstrip('/')\n else:\n tag = '{}/{}'.format(family, scope.rstrip('/'))\n yield (tag, scope)", + "docstring": "Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on and use the full scope name in the tag. If is set, then the tag name will be '/', where is . This ensures that is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as parameter to name_scope. Yields: A tuple , both of which are unique and should be used for the tag and the scope for the summary to output.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_op_util.py", + "ast_data": "FunctionDef name:summary_scope arg:name arg:family arg:default_name arg:values arguments arg arg arg arg Assign Call Assign Call Assign Compare Call With Call If Compare Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_ListCodec", + "source_code": "class _ListCodec:\n\n def can_encode(self, pyobj):\n return isinstance(pyobj, list)\n\n def do_encode(self, list_value, encode_fn):\n encoded_list = struct_pb2.StructuredValue()\n encoded_list.list_value.CopyFrom(struct_pb2.ListValue())\n for element in list_value:\n encoded_list.list_value.values.add().CopyFrom(encode_fn(element))\n return encoded_list\n\n def can_decode(self, value):\n return value.HasField('list_value')\n\n def do_decode(self, value, decode_fn):\n return [decode_fn(element) for element in value.list_value.values]", + "docstring": "Codec for lists.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "ClassDef name:_ListCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:list_value arg:encode_fn arguments arg arg arg Assign Call Call Call For Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "CreateShapeFromNumpy", + "source_code": "def CreateShapeFromNumpy(value):\n if isinstance(value, tuple):\n return Shape(xla_data_pb2.TUPLE, [CreateShapeFromNumpy(component) for component in value])\n else:\n return _CreateShapeFromNumpy(value)", + "docstring": "Create a Shape from a Numpy array or a nested tuple structure thereof. Args: value: Numpy array or (possibly nested) tuple structure that bottoms out in Numpy arrays. Returns: A Shape object.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py", + "ast_data": "FunctionDef name:CreateShapeFromNumpy arg:value arguments arg If Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "defer_succeed", + "source_code": "def defer_succeed(result: _T) -> Deferred[_T]:\n from twisted.internet import reactor\n d: Deferred[_T] = Deferred()\n reactor.callLater(_DEFER_DELAY, d.callback, result)\n return d", + "docstring": "Same as twisted.internet.defer.succeed but delay calling callback until next reactor loop It delays by 100ms so reactor has a chance to go through readers and writers before attending pending delayed calls, so do not set delay to zero.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\defer.py", + "ast_data": "FunctionDef name:defer_succeed arg:result arguments arg Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_fontstretch", + "source_code": "def set_fontstretch(self, stretch):\n self._fontproperties.set_stretch(stretch)\n self.stale = True", + "docstring": "Set the font stretch (horizontal condensation or expansion). Parameters ---------- stretch : {a numeric value in range 0-1000, 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'} See Also -------- .font_manager.FontProperties.set_stretch", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_fontstretch arg:self arg:stretch arguments arg arg Call Assign" + }, + { + "library": "pandas", + "name": "_apply_axis_properties", + "source_code": "@final\n@staticmethod\ndef _apply_axis_properties(axis: Axis, rot=None, fontsize: int | None=None) -> None:\n if rot is not None or fontsize is not None:\n labels = axis.get_majorticklabels() + axis.get_minorticklabels()\n for label in labels:\n if rot is not None:\n label.set_rotation(rot)\n if fontsize is not None:\n label.set_fontsize(fontsize)", + "docstring": "Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py", + "ast_data": "FunctionDef name:_apply_axis_properties arg:axis arg:rot arg:fontsize arguments arg arg arg If BoolOp Compare Compare Assign Call Call For If Compare Call If Compare Call" + }, + { + "library": "pandas", + "name": "_box_as_indexlike", + "source_code": "def _box_as_indexlike(dt_array: ArrayLike, utc: bool=False, name: Hashable | None=None) -> Index:\n if lib.is_np_dtype(dt_array.dtype, 'M'):\n tz = 'utc' if utc else None\n return DatetimeIndex(dt_array, tz=tz, name=name)\n return Index(dt_array, name=name, dtype=dt_array.dtype)", + "docstring": "Properly boxes the ndarray of datetimes to DatetimeIndex if it is possible or to generic Index instead Parameters ---------- dt_array: 1-d array Array of datetimes to be wrapped in an Index. utc : bool Whether to convert/localize timestamps to UTC. name : string, default None Name for a resulting index Returns ------- result : datetime of converted dates - DatetimeIndex if convertible to sole datetime64 type - general Index otherwise", + "type": "function", + "file_path": "pandas\\pandas\\core\\tools\\datetimes.py", + "ast_data": "FunctionDef name:_box_as_indexlike arg:dt_array arg:utc arg:name arguments arg arg arg If Call Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_filename_and_line_from_caller", + "source_code": "def set_filename_and_line_from_caller(self, offset: int=0) -> int:\n retcode = self.SUCCESS\n frame = inspect.currentframe()\n if not frame:\n return self.FAILURE\n frame = cast(types.FrameType, frame)\n for _ in range(offset + 1):\n parent = frame.f_back\n if parent is None:\n retcode = self.HEURISTIC_USED\n break\n parent = cast(types.FrameType, parent)\n frame = parent\n self.filename = frame.f_code.co_filename\n self.lineno = cast(int, frame.f_lineno)\n return retcode", + "docstring": "Set filename and line using the caller's stack frame. If the requested stack information is not available, a heuristic may be applied and self.HEURISTIC USED will be returned. If the heuristic fails then no change will be made to the filename and lineno members (None by default) and self.FAILURE will be returned. Args: offset: Integer. If 0, the caller's stack frame is used. If 1, the caller's caller's stack frame is used. Larger values are permissible but if out-of-range (larger than the number of stack frames available) the outermost stack frame will be used. Returns: TraceableObject.SUCCESS if appropriate stack information was found, TraceableObject.HEURISTIC_USED if the offset was larger than the stack, and TraceableObject.FAILURE if the stack was empty.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py", + "ast_data": "FunctionDef name:set_filename_and_line_from_caller arg:self arg:offset arguments arg arg Assign Assign Call If Return return:yes Assign Call For Call Assign If Compare Assign Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_FCLinearWrapper", + "source_code": "class _FCLinearWrapper(base.Layer):\n\n def __init__(self, feature_column, units=1, sparse_combiner='sum', weight_collections=None, trainable=True, name=None, **kwargs):\n super(_FCLinearWrapper, self).__init__(trainable=trainable, name=name, **kwargs)\n self._feature_column = feature_column\n self._units = units\n self._sparse_combiner = sparse_combiner\n self._weight_collections = weight_collections\n\n def build(self, _):\n if isinstance(self._feature_column, _CategoricalColumn):\n weight = self.add_variable(name='weights', shape=(self._feature_column._num_buckets, self._units), initializer=init_ops.zeros_initializer(), trainable=self.trainable)\n else:\n num_elements = self._feature_column._variable_shape.num_elements()\n weight = self.add_variable(name='weights', shape=[num_elements, self._units], initializer=init_ops.zeros_initializer(), trainable=self.trainable)\n _add_to_collections(weight, self._weight_collections)\n self._weight_var = weight\n self.built = True\n\n def call(self, builder):\n weighted_sum = _create_weighted_sum(column=self._feature_column, builder=builder, units=self._units, sparse_combiner=self._sparse_combiner, weight_collections=self._weight_collections, trainable=self.trainable, weight_var=self._weight_var)\n return weighted_sum", + "docstring": "Wraps a _FeatureColumn in a layer for use in a linear model. See above.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "ClassDef name:_FCLinearWrapper FunctionDef name:__init__ arg:self arg:feature_column arg:units arg:sparse_combiner arg:weight_collections arg:trainable arg:name arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:build arg:self arg:_ arguments arg arg If Call Assign Call Call Assign Call Assign Call Call Call Assign Assign FunctionDef name:call arg:self arg:builder arguments arg arg Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "legval3d", + "source_code": "def legval3d(x, y, z, c):\n return pu._valnd(legval, c, x, y, z)", + "docstring": "Evaluate a 3-D Legendre series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) The parameters , , and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either , , and or their elements must support multiplication and addition both with themselves and with the elements of . If has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `xyzxyzcxyz`. See Also -------- legval, legval2d, leggrid2d, leggrid3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legval3d arg:x arg:y arg:z arg:c arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "check", + "source_code": "def check(self, value):\n raise NotImplementedError", + "docstring": "Returns a byte tensor of `` indicating whether each event in value satisfies this constraint.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "FunctionDef name:check arg:self arg:value arguments arg arg Raise" + }, + { + "library": "pytorch", + "name": "set_local_fwd_input", + "source_code": "def set_local_fwd_input(self, prev_stage_outputs: Any, mb_index: int) -> None:\n recv_infos: tuple[InputInfo, ...] = self.args_recv_info[mb_index]\n prev_stage_outputs = _normalize_model_output_as_tuple(prev_stage_outputs)\n for info, tensor in zip(recv_infos, prev_stage_outputs):\n assert isinstance(tensor, torch.Tensor), f'expected tensor values as outputs from prev stage, got {type(tensor)}'\n assert isinstance(info, _RecvInfo), 'set_local_Fwd_input should only be called on non-first stage, which should always have RecvInfo'\n info.buffer = tensor.detach().requires_grad_(True)", + "docstring": "Moves 'prev_stage_outputs' from another stage on the same rank into place as inputs for this stage. Avoids copying tensor data or using send/recv op. Detaches original tensor and sets requires_grad so the tensor can serve as a leaf for autograd and gradients can be collected from it during backward.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:set_local_fwd_input arg:self arg:prev_stage_outputs arg:mb_index arguments arg arg arg Assign Call For Call Call Call Call Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "ParamsDict", + "source_code": "class ParamsDict(ReprHTMLMixin, UserDict):\n _html_repr = _params_html_repr\n\n def __init__(self, params=None, non_default=tuple()):\n super().__init__(params or {})\n self.non_default = non_default", + "docstring": "Dictionary-like class to store and provide an HTML representation. It builds an HTML structure to be used with Jupyter notebooks or similar environments. It allows storing metadata to track non-default parameters. Parameters ---------- params : dict, default=None The original dictionary of parameters and their values. non_default : tuple The list of non-default parameters.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\params.py", + "ast_data": "ClassDef name:ParamsDict Assign FunctionDef name:__init__ arg:self arg:params arg:non_default arguments arg arg arg Call Call Call BoolOp Assign" + }, + { + "library": "tensorflow", + "name": "_grad_fn", + "source_code": "def _grad_fn(func_graph, grads):\n assert len(func_graph.outputs) == len(grads)\n ys = []\n grad_ys = []\n for y, grad_y in zip(func_graph.outputs, grads):\n if not backprop_util.IsTrainable(y):\n continue\n ys.append(y)\n grad_ys.append(grad_y)\n result = gradients_util._GradientsHelper(ys, func_graph.inputs, grad_ys=grad_ys, src_graph=func_graph)\n return result", + "docstring": "The gradient function for each conditional branch. This function builds the gradient graph of the corresponding forward-pass conditional branch in . This is done by differentiating func_graph's outputs w.r.t. its inputs. Args: func_graph: FuncGraph. The corresponding forward-pass function. grads: The list of input gradient Tensors. Returns: The output gradient Tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:_grad_fn arg:func_graph arg:grads arguments arg arg Compare Call Call Assign Assign For Call If Call Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_update_inplace", + "source_code": "@final\ndef _update_inplace(self, result) -> None:\n self._mgr = result._mgr", + "docstring": "Replace self internals with result. Parameters ---------- result : same type as self", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_update_inplace arg:self arg:result arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, replacements):\n self.replacements = replacements\n self.in_replacements = False\n self.preserved_annos = {anno.Basic.DIRECTIVES, anno.Basic.EXTRA_LOOP_TEST, anno.Basic.ORIGIN, anno.Basic.SKIP_PROCESSING, anno.Static.ORIG_DEFINITIONS, 'function_context_name'}", + "docstring": "Create a new ReplaceTransformer. Args: replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:replacements arguments arg arg Assign Assign Assign" + }, + { + "library": "pandas", + "name": "right", + "source_code": "@cache_readonly\ndef right(self) -> Index:\n return Index(self._data.right, copy=False)", + "docstring": "Return right bounds of the intervals in the IntervalIndex. The right bounds of each interval in the IntervalIndex are returned as an Index. The datatype of the right bounds is the same as the datatype of the endpoints of the intervals. Returns ------- Index An Index containing the right bounds of the intervals. See Also -------- IntervalIndex.left : Return the left bounds of the intervals in the IntervalIndex. IntervalIndex.mid : Return the mid-point of the intervals in the IntervalIndex. IntervalIndex.length : Return the length of the intervals in the IntervalIndex. Examples -------- >>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed=\"right\") >>> iv_idx.right Index([4, 5, 6], dtype='int64') >>> iv_idx = pd.IntervalIndex.from_tuples( ... [(1, 4), (2, 5), (3, 6)], closed=\"left\" ... ) >>> iv_idx.right Index([4, 5, 6], dtype='int64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\interval.py", + "ast_data": "FunctionDef name:right arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "Kron", + "source_code": "class Kron(Benchmark):\n\n def setup(self):\n self.large_arr = np.random.random((10,) * 4)\n self.large_mat = np.asmatrix(np.random.random((100, 100)))\n self.scalar = 7\n\n def time_arr_kron(self):\n np.kron(self.large_arr, self.large_arr)\n\n def time_scalar_kron(self):\n np.kron(self.large_arr, self.scalar)\n\n def time_mat_kron(self):\n np.kron(self.large_mat, self.large_mat)", + "docstring": "Benchmarks for Kronecker product of two arrays", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_shape_base.py", + "ast_data": "ClassDef name:Kron FunctionDef name:setup arg:self arguments arg Assign Call Assign Call Call Assign FunctionDef name:time_arr_kron arg:self arguments arg Call FunctionDef name:time_scalar_kron arg:self arguments arg Call FunctionDef name:time_mat_kron arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "sync", + "source_code": "def sync(self):\n self._queue.join()\n logging.info('Sync on ongoing save/restore.')", + "docstring": "Sync on any ongoing save or restore events.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py", + "ast_data": "FunctionDef name:sync arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "PassResult", + "source_code": "@compatibility(is_backward_compatible=False)\nclass PassResult(namedtuple('PassResult', ['graph_module', 'modified'])):\n __slots__ = ()\n\n def __new__(cls, graph_module, modified):\n return super().__new__(cls, graph_module, modified)", + "docstring": "Result of a pass: graph_module: The modified graph module modified: A flag for if the pass has modified the graph module", + "type": "class", + "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py", + "ast_data": "ClassDef name:PassResult Call Assign FunctionDef name:__new__ arg:cls arg:graph_module arg:modified arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "from_onnx_type", + "source_code": "@classmethod\ndef from_onnx_type(cls, onnx_type: int | _C_onnx.TensorProtoDataType | None) -> JitScalarType:\n if onnx_type not in _ONNX_TO_SCALAR_TYPE:\n raise errors.OnnxExporterError(f'Unknown onnx_type: {onnx_type}')\n return _ONNX_TO_SCALAR_TYPE[typing.cast(_C_onnx.TensorProtoDataType, onnx_type)]", + "docstring": "Convert a ONNX data type to JitScalarType. Args: onnx_type: A torch._C._onnx.TensorProtoDataType to create a JitScalarType from Returns: JitScalarType Raises: OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_type_utils.py", + "ast_data": "FunctionDef name:from_onnx_type arg:cls arg:onnx_type arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_model_ready", + "source_code": "def _model_ready(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n return _ready(self._ready_op, sess, 'Model not ready')", + "docstring": "Checks if the model is ready or not. Args: sess: A . Returns: A tuple (is_ready, msg), where is_ready is True if ready and False otherwise, and msg is if the model is ready, a with the reason why it is not ready otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py", + "ast_data": "FunctionDef name:_model_ready arg:self arg:sess arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, size=1):\n size1d = tuple(np.atleast_1d(size))\n N = np.prod(size1d)\n x = np.zeros(N)\n simulated, i = (0, 1)\n while simulated < N:\n k = N - simulated\n u1 = self._umax * self._rng.uniform(size=k)\n v1 = self._rng.uniform(self._vmin, self._vmax, size=k)\n rvs = v1 / u1 + self._c\n accept = u1 ** 2 <= self._pdf(rvs)\n num_accept = np.sum(accept)\n if num_accept > 0:\n x[simulated:simulated + num_accept] = rvs[accept]\n simulated += num_accept\n if simulated == 0 and i * N >= 50000:\n msg = f'Not a single random variate could be generated in {i * N} attempts. The ratio of uniforms method does not appear to work for the provided parameters. Please check the pdf and the bounds.'\n raise RuntimeError(msg)\n i += 1\n return np.reshape(x, size1d)", + "docstring": "Sampling of random variates Parameters ---------- size : int or tuple of ints, optional Number of random variates to be generated (default is 1). Returns ------- rvs : ndarray The random variates distributed according to the probability distribution defined by the pdf.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_sampling.py", + "ast_data": "FunctionDef name:rvs arg:self arg:size arguments arg arg Assign Call Call Assign Call Assign Call Assign While Compare Assign Assign Call Assign Call Assign Assign Compare Call Assign Call If Compare Assign If BoolOp Compare Compare Assign Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "rc_params", + "source_code": "def rc_params(fail_on_error=False):\n return rc_params_from_file(matplotlib_fname(), fail_on_error)", + "docstring": "Construct a instance from the default Matplotlib rc file.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:rc_params arg:fail_on_error arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "merged", + "source_code": "@property\ndef merged(self):\n return self._topology(capi.geos_linemerge(self.ptr))", + "docstring": "Return the line merge of this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:merged arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "add_subplot", + "source_code": "@_docstring.interpd\ndef add_subplot(self, *args, **kwargs):\n if 'figure' in kwargs:\n raise _api.kwarg_error('add_subplot', 'figure')\n if len(args) == 1 and isinstance(args[0], mpl.axes._base._AxesBase) and args[0].get_subplotspec():\n ax = args[0]\n key = ax._projection_init\n if ax.get_figure(root=False) is not self:\n raise ValueError('The Axes must have been created in the present figure')\n else:\n if not args:\n args = (1, 1, 1)\n if len(args) == 1 and isinstance(args[0], Integral) and (100 <= args[0] <= 999):\n args = tuple(map(int, str(args[0])))\n projection_class, pkw = self._process_projection_requirements(**kwargs)\n ax = projection_class(self, *args, **pkw)\n key = (projection_class, pkw)\n return self._add_axes_internal(ax, key)", + "docstring": "Add an to the figure as part of a subplot arrangement. Call signatures:: add_subplot(nrows, ncols, index, **kwargs) add_subplot(pos, **kwargs) add_subplot(ax) add_subplot() Parameters ---------- *args : int, (int, int, *index*), or , default: (1, 1, 1) The position of the subplot described by one of - Three integers (*nrows*, *ncols*, *index*). The subplot will take the *index* position on a grid with *nrows* rows and *ncols* columns. *index* starts at 1 in the upper left corner and increases to the right. *index* can also be a two-tuple specifying the (*first*, *last*) indices (1-based, and including *last*) of the subplot, e.g., `.SubplotSpec.add_subplot~.axes.Axes~matplotlib.projections~.axes.Axes.axes.Axesaxisartist_users-guide-index~matplotlib.axes.Axes~matplotlib.axis~.axes.Axes.projections.polar.PolarAxes~.axes.Axes` can be found in the following table but there might also be other keyword arguments if another projection is used. %(Axes:kwdoc)s See Also -------- .Figure.add_axes .pyplot.subplot .pyplot.axes .Figure.subplots .pyplot.subplots Examples -------- :: fig = plt.figure() fig.add_subplot(231) ax1 = fig.add_subplot(2, 3, 1) # equivalent but more general fig.add_subplot(232, frameon=False) # subplot with no frame fig.add_subplot(233, projection='polar') # polar subplot fig.add_subplot(234, sharex=ax1) # subplot sharing x-axis with ax1 fig.add_subplot(235, facecolor=\"red\") # red subplot ax1.remove() # delete ax1 from the figure fig.add_subplot(ax1) # add ax1 back to the figure", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:add_subplot arg:self arguments arg arg arg If Compare Raise Call If BoolOp Compare Call Call Call Assign Assign If Compare Call Raise Call If Assign If BoolOp Compare Call Call Compare Assign Call Call Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "_raise_degree", + "source_code": "@staticmethod\ndef _raise_degree(c, d):\n if d == 0:\n return c\n k = c.shape[0] - 1\n out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)\n for a in range(c.shape[0]):\n f = c[a] * comb(k, a)\n for j in range(d + 1):\n out[a + j] += f * comb(d, j) / comb(k + d, a + j)\n return out", + "docstring": "Raise a degree of a polynomial in the Bernstein basis. Given the coefficients of a polynomial degree , return (the coefficients of) the equivalent polynomial of degree . Parameters ---------- c : array_like coefficient array, 1-D d : integer Returns ------- array coefficient array, 1-D array of length Notes ----- This uses the fact that a Bernstein polynomial can be identically represented as a linear combination of polynomials of a higher degree : .. math:: b_{a, k} = comb(k, a) \\sum_{j=0}^{d} b_{a+j, k+d} \\ comb(d, j) / comb(k+d, a+j)", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:_raise_degree arg:c arg:d arguments arg arg If Compare Return return:yes Assign Assign Call For Call Assign Call For Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "add_initial_prefix", + "source_code": "def add_initial_prefix(self, field_name):\n return 'initial-%s' % self.add_prefix(field_name)", + "docstring": "Add an 'initial' prefix for checking dynamic initial values.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:add_initial_prefix arg:self arg:field_name arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "add_to_tensor", + "source_code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n return self._possibly_broadcast_batch_shape(mat)", + "docstring": "Add matrix represented by this operator to . Equiv to . Args: mat: with same and shape broadcastable to . name: A name to give this . Returns: A with broadcast shape and same as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py", + "ast_data": "FunctionDef name:add_to_tensor arg:self arg:mat arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "iloc", + "source_code": "@property\ndef iloc(self) -> _iLocIndexer:\n return _iLocIndexer('iloc', self)", + "docstring": "Purely integer-location based indexing for selection by position. .. versionchanged:: 3.0 Callables which return a tuple are deprecated as input. `Selection by Position slicexslice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:iloc arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "Pad", + "source_code": "class Pad(Benchmark):\n param_names = ['shape', 'pad_width', 'mode']\n params = [[(2 ** 22,), (1024, 1024), (256, 128, 1), (4, 4, 4, 4), (1, 1, 1, 1, 1)], [1, 8, (0, 32)], ['constant', 'edge', 'linear_ramp', 'mean', 'reflect', 'wrap']]\n\n def setup(self, shape, pad_width, mode):\n self.array = np.full(shape, fill_value=1, dtype=np.float64)\n\n def time_pad(self, shape, pad_width, mode):\n np.pad(self.array, pad_width, mode)", + "docstring": "Benchmarks for . When benchmarking the pad function it is useful to cover scenarios where the ratio between the size of the input array and the output array differs significantly (original area vs. padded area). This allows to evaluate for which scenario a padding algorithm is optimized. Furthermore involving large range of array sizes ensures that the effects of CPU-bound caching is visible. The table below shows the sizes of the arrays involved in this benchmark: +-----------------+----------+-----------+-----------+-----------------+ | shape | original | padded: 1 | padded: 8 | padded: (0, 32) | +=================+==========+===========+===========+=================+ | (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB | +-----------------+----------+-----------+-----------+-----------------+ | (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB | +-----------------+----------+-----------+-----------+-----------------+ | (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB | +-----------------+----------+-----------+-----------+-----------------+ | (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB | +-----------------+----------+-----------+-----------+-----------------+ | (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB | +-----------------+----------+-----------+-----------+-----------------+", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_lib.py", + "ast_data": "ClassDef name:Pad Assign Assign FunctionDef name:setup arg:self arg:shape arg:pad_width arg:mode arguments arg arg arg arg Assign Call FunctionDef name:time_pad arg:self arg:shape arg:pad_width arg:mode arguments arg arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "_collect_input_masks", + "source_code": "def _collect_input_masks(self, inputs, args, kwargs):\n if self._call_arg_was_passed('mask', args, kwargs):\n return self._get_call_arg_value('mask', args, kwargs)\n if not self._should_compute_mask:\n return None\n input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None), inputs)\n if generic_utils.is_all_none(input_masks):\n return None\n return input_masks", + "docstring": "Checks if argument was passed, else gathers mask from inputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_collect_input_masks arg:self arg:inputs arg:args arg:kwargs arguments arg arg arg arg If Call Return return:yes Call If Return return:no Assign Call arguments arg Call If Call Return return:no Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_callback", + "source_code": "def _callback(self, transformation):\n if self.callback is not None:\n self.callback(transformation, self.n_iter_)\n self.n_iter_ += 1", + "docstring": "Called after each iteration of the optimizer. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The solution computed by the optimizer in this iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_nca.py", + "ast_data": "FunctionDef name:_callback arg:self arg:transformation arguments arg arg If Compare Call" + }, + { + "library": "scikit-learn", + "name": "normalized_mutual_info_score", + "source_code": "@validate_params({'labels_true': ['array-like'], 'labels_pred': ['array-like'], 'average_method': [StrOptions({'arithmetic', 'max', 'min', 'geometric'})]}, prefer_skip_nested_validation=True)\ndef normalized_mutual_info_score(labels_true, labels_pred, *, average_method='arithmetic'):\n labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n classes = np.unique(labels_true)\n clusters = np.unique(labels_pred)\n if classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0:\n return 1.0\n contingency = contingency_matrix(labels_true, labels_pred, sparse=True)\n contingency = contingency.astype(np.float64, copy=False)\n mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)\n if mi == 0:\n return 0.0\n h_true, h_pred = (entropy(labels_true), entropy(labels_pred))\n normalizer = _generalized_average(h_true, h_pred, average_method)\n return float(mi / normalizer)", + "docstring": "Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is a normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by some generalized mean of `average_methodadjusted_mutual_info_scoreUser Guide ` changed from 'geometric' to 'arithmetic'. Returns ------- nmi : float Score between 0.0 and 1.0 in normalized nats (based on the natural logarithm). 1.0 stands for perfectly complete labeling. See Also -------- v_measure_score : V-Measure (NMI with arithmetic mean option). adjusted_rand_score : Adjusted Rand Index. adjusted_mutual_info_score : Adjusted Mutual Information (adjusted against chance). Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import normalized_mutual_info_score >>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the NMI is null:: >>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py", + "ast_data": "FunctionDef name:normalized_mutual_info_score arg:labels_true arg:labels_pred arguments arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Assign Call Assign Call Assign Call If Compare Return return:yes Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "min", + "source_code": "@property\ndef min(self):\n if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):\n raise TypeError(f'Cannot find minimum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')\n try:\n return ml_dtypes.finfo(self.as_numpy_dtype).min\n except:\n try:\n return ml_dtypes.iinfo(self.as_numpy_dtype).min\n except:\n raise TypeError(f'Cannot find minimum value of {self}.')", + "docstring": "Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:min arg:self arguments arg If BoolOp Compare Raise Call Try Return return:yes Call ExceptHandler Try Return return:yes Call ExceptHandler Raise Call" + }, + { + "library": "sphinx", + "name": "merge_info_from", + "source_code": "def merge_info_from(self, docnames: Iterable[str], other: BuildEnvironment, app: Sphinx) -> None:\n docnames = frozenset(docnames)\n for docname in docnames:\n self.all_docs[docname] = other.all_docs[docname]\n self.included[docname] = other.included[docname]\n if docname in other.reread_always:\n self.reread_always.add(docname)\n self.domains._merge_domain_data(docnames, other.domaindata)\n self.events.emit('env-merge-info', self, docnames, other)", + "docstring": "Merge global information gathered about *docnames* while reading them from the *other* environment. This possibly comes from a parallel build process.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:merge_info_from arg:self arg:docnames arg:other arg:app arguments arg arg arg arg Assign Call For Assign Assign If Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "_codegen_partition_wrapper", + "source_code": "def _codegen_partition_wrapper(self, partition: PartitionType, signature: GraphPartitionSignature) -> None:\n from .codegen.wrapper import SubgraphPythonWrapperCodegen\n parent_wrapper_code = V.graph.wrapper_code\n graph_partition_id = next(self._graph_partition_counter)\n with V.graph.set_current_wrapper_code():\n V.graph.init_wrapper_code(is_subgraph=True, subgraph_name=f'partition_{graph_partition_id}', parent_wrapper_code=parent_wrapper_code, partition_signatures=signature)\n self._codegen(partition)\n assert isinstance(V.graph.wrapper_code, SubgraphPythonWrapperCodegen)\n signature = self.clean_removed_buffer_from_partition_signatures(signature)\n V.graph.wrapper_code.partition_signatures = signature\n V.graph.wrapper_code.write_prefix()\n partition_code, _ = V.graph.wrapper_code.generate(V.graph.is_inference)\n V.graph.wrapper_code.define_subgraph_launcher_fn(partition_code.value)\n V.graph.wrapper_code.codegen_partition_call(graph_partition_id, signature)\n V.graph.wrapper_code.allocated.update([node.get_name() for node in signature.output_nodes])", + "docstring": "Codegen a partition given its inputs/outputs", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:_codegen_partition_wrapper arg:self arg:partition arg:signature arguments arg arg arg Assign Assign Call With Call Call Call Call Assign Call Assign Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_op_in_graph_mode", + "source_code": "def _op_in_graph_mode(tensor):\n if context.executing_eagerly():\n return tensor\n return tensor.op", + "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:_op_in_graph_mode arg:tensor arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "guard_or_false", + "source_code": "def guard_or_false(a: BoolLikeType) -> bool:\n return _guard_or(a, False)", + "docstring": "Try to guard a, if data dependent error encountered just return false.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:guard_or_false arg:a arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "enable_onednn_fusion", + "source_code": "def enable_onednn_fusion(enabled: bool):\n torch._C._jit_set_llga_enabled(enabled)", + "docstring": "Enable or disables onednn JIT fusion based on the parameter .", + "type": "function", + "file_path": "pytorch\\torch\\jit\\__init__.py", + "ast_data": "FunctionDef name:enable_onednn_fusion arg:enabled arguments arg Call" + }, + { + "library": "pandas", + "name": "unstack", + "source_code": "def unstack(self, unstacker, fill_value) -> BlockManager:\n new_columns = unstacker.get_new_columns(self.items)\n new_index = unstacker.new_index\n allow_fill = not unstacker.mask_all\n if allow_fill:\n new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)\n needs_masking = new_mask2D.any(axis=0)\n else:\n needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool)\n new_blocks: list[Block] = []\n columns_mask: list[np.ndarray] = []\n if len(self.items) == 0:\n factor = 1\n else:\n fac = len(new_columns) / len(self.items)\n assert fac == int(fac)\n factor = int(fac)\n for blk in self.blocks:\n mgr_locs = blk.mgr_locs\n new_placement = mgr_locs.tile_for_unstack(factor)\n blocks, mask = blk._unstack(unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking)\n new_blocks.extend(blocks)\n columns_mask.extend(mask)\n assert mask.sum() == sum((len(nb._mgr_locs) for nb in blocks))\n new_columns = new_columns[columns_mask]\n bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)\n return bm", + "docstring": "Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:unstack arg:self arg:unstacker arg:fill_value arguments arg arg arg Assign Call Assign Assign If Assign Call Assign Call Assign Call If Compare Call Assign Assign Call Call Compare Call Assign Call For Assign Assign Call Assign Call Call Call Compare Call Call Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "press_zoom", + "source_code": "def press_zoom(self, event):\n if event.button not in [MouseButton.LEFT, MouseButton.RIGHT] or event.x is None or event.y is None:\n return\n axes = self._start_event_axes_interaction(event, method='zoom')\n if not axes:\n return\n id_zoom = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)\n parent_ax = axes[0]\n if hasattr(parent_ax, '_colorbar'):\n cbar = parent_ax._colorbar.orientation\n else:\n cbar = None\n self._zoom_info = self._ZoomInfo(button=event.button, start_xy=(event.x, event.y), axes=axes, cid=id_zoom, cbar=cbar)", + "docstring": "Callback for mouse button press in zoom to rect mode.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:press_zoom arg:self arg:event arguments arg arg If BoolOp Compare Compare Compare Return return:no Assign Call If Return return:no Assign Call Assign If Call Assign Assign Assign Call" + }, + { + "library": "kornia", + "name": "pixel_format", + "source_code": "@property\ndef pixel_format(self) -> PixelFormat:\n return self._pixel_format", + "docstring": "Return the pixel format.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:pixel_format arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_cpu_count_user", + "source_code": "def _cpu_count_user(os_cpu_count):\n cpu_count_affinity = _cpu_count_affinity(os_cpu_count)\n cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)\n cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', os_cpu_count))\n return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)", + "docstring": "Number of user defined available CPUs", + "type": "function", + "file_path": "scipy\\.spin\\cmds.py", + "ast_data": "FunctionDef name:_cpu_count_user arg:os_cpu_count arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_dispatch", + "source_code": "def _dispatch(table, min, max=None, state=None, args=('raw',)):\n\n def decorate(method):\n get_args = [_arg_mapping[x] for x in args]\n\n @wraps(method)\n def wrapper(self, byte):\n if state is not None and self.state != state:\n raise ValueError('state precondition failed')\n return method(self, *[f(self, byte - min) for f in get_args])\n if max is None:\n table[min] = wrapper\n else:\n for i in range(min, max + 1):\n assert table[i] is None\n table[i] = wrapper\n return wrapper\n return decorate", + "docstring": "Decorator for dispatch by opcode. Sets the values in *table* from *min* to *max* to this method, adds a check that the Dvi state matches *state* if not None, reads arguments from the file according to *args*. Parameters ---------- table : dict[int, callable] The dispatch table to be filled in. min, max : int Range of opcodes that calls the registered function; *max* defaults to *min*. state : _dvistate, optional State of the Dvi object in which these opcodes are allowed. args : list[str], default: ['raw'] Sequence of argument specifications: - 'raw': opcode minus minimum - 'u1': read one unsigned byte - 'u4': read four bytes, treat as an unsigned number - 's4': read four bytes, treat as a signed number - 'slen': read (opcode - minimum) bytes, treat as signed - 'slen1': read (opcode - minimum + 1) bytes, treat as signed - 'ulen1': read (opcode - minimum + 1) bytes, treat as unsigned - 'olen1': read (opcode - minimum + 1) bytes, treat as unsigned if under four bytes, signed if four bytes", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "FunctionDef name:_dispatch arg:table arg:min arg:max arg:state arg:args arguments arg arg arg arg arg FunctionDef name:decorate arg:method arguments arg Assign FunctionDef name:wrapper arg:self arg:byte arguments arg arg If BoolOp Compare Compare Raise Call Return return:yes Call Call Call If Compare Assign For Call Compare Assign Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_infer_fft_length_for_irfft", + "source_code": "def _infer_fft_length_for_irfft(input_tensor, fft_rank):\n fft_shape = input_tensor.get_shape()[-fft_rank:]\n if not fft_shape.is_fully_defined():\n fft_length = _array_ops_stack.unstack(_array_ops.shape(input_tensor)[-fft_rank:])\n fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))\n return _array_ops_stack.stack(fft_length)\n fft_length = fft_shape.as_list()\n if fft_length:\n fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))\n return _ops.convert_to_tensor(fft_length, _dtypes.int32)", + "docstring": "Infers the argument for a IRFFT from .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_infer_fft_length_for_irfft arg:input_tensor arg:fft_rank arguments arg arg Assign Call If Call Assign Call Call Assign Call Return return:yes Call Assign Call If Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "LovaszSoftmaxLoss", + "source_code": "class LovaszSoftmaxLoss(nn.Module):\n\n def __init__(self, weight: Optional[Tensor]=None) -> None:\n super().__init__()\n self.weight = weight\n\n def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n return lovasz_softmax_loss(pred=pred, target=target, weight=self.weight)", + "docstring": "Criterion that computes a surrogate multi-class intersection-over-union (IoU) loss. According to [1], we compute the IoU as follows: .. math:: \\text{IoU}(x, class) = \\frac{|X \\cap Y|}{|X \\cup Y|} [1] approximates this fomular with a surrogate, which is fully differentable. Where: - :math: expects to be the scores of each class. - :math: expects to be the binary tensor with the class labels. the loss, is finally computed as: .. math:: \\text{loss}(x, class) = 1 - \\text{IoU}(x, class) Reference: [1] .. note:: This loss function only supports multi-class (C > 1) labels. For binary labels please use the Lovasz-Hinge loss. Args: pred: logits tensor with shape :math: where C = number of classes > 1. labels: labels tensor with shape :math: where each value is :math:. weight: weights for classes with shape :math:. Return: a scalar with the computed loss. Example: >>> N = 5 # num_classes >>> criterion = LovaszSoftmaxLoss() >>> pred = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = criterion(pred, target) >>> output.backward()", + "type": "class", + "file_path": "kornia\\kornia\\losses\\lovasz_softmax.py", + "ast_data": "ClassDef name:LovaszSoftmaxLoss FunctionDef name:__init__ arg:self arg:weight arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "device_count", + "source_code": "def device_count() -> int:\n acc = current_accelerator()\n if acc is None:\n return 0\n mod = torch.get_device_module(acc)\n return mod.device_count()", + "docstring": "Return the number of current :ref: available. Returns: int: the number of the current :ref: available. If there is no available accelerators, return 0. .. note:: This API delegates to the device-specific version of . On CUDA, this API will NOT posion fork if NVML discovery succeeds. Otherwise, it will. For more details, see :ref:.", + "type": "function", + "file_path": "pytorch\\torch\\accelerator\\__init__.py", + "ast_data": "FunctionDef name:device_count arguments Assign Call If Compare Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_should_init", + "source_code": "@property\ndef experimental_should_init(self):\n return self._strategy.extended.experimental_should_init", + "docstring": "Whether to run init ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "FunctionDef name:experimental_should_init arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "thumbprint", + "source_code": "def thumbprint(self):\n fields = list(self.REQUIRED_JSON_FIELDS)\n fields.append('kty')\n fields.sort()\n data = OrderedDict()\n for k in fields:\n data[k] = self.tokens[k]\n json_data = json_dumps(data)\n digest_data = hashlib.sha256(to_bytes(json_data)).digest()\n return to_unicode(urlsafe_b64encode(digest_data))", + "docstring": "Implementation of RFC7638 JSON Web Key (JWK) Thumbprint.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\base_key.py", + "ast_data": "FunctionDef name:thumbprint arg:self arguments arg Assign Call Call Call Assign Call For Assign Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_resource_creator_scope", + "source_code": "@tf_contextlib.contextmanager\ndef _resource_creator_scope(self, resource_type, creator) -> Iterator[None]:\n old = self._resource_creator_stack\n new = copy.deepcopy(old)\n if isinstance(resource_type, (list, tuple)):\n for r in resource_type:\n new[r].append(creator)\n else:\n new[resource_type].append(creator)\n self._thread_local._resource_creator_stack = new\n try:\n yield\n finally:\n if self._thread_local._resource_creator_stack is not new:\n raise RuntimeError('Exiting resource_creator_scope without proper nesting.')\n self._thread_local._resource_creator_stack = old", + "docstring": "Scope which defines a resource creation function used by some resource. The resource should be a subclass of CapturableResource with a class method , the output of which is what the argument should be. By default, returns the class name, . Given a scope, creators being added with the same argument will be composed together to apply to all classes with this . is expected to be a function with the following signature: The creator is supposed to eventually call the next_creator to create an instance if it does want to create an instance and not call the class initialization method directly. This helps make creators composable. A creator may choose to create multiple instances, return already existing instances, or simply register that an instance was created and defer to the next creator in line. Creators can also modify keyword arguments seen by the next creators. Valid keyword arguments in depends on the specific resource class. For StaticHashTable, this may be: * initializer: The table initializer to use. * default_value: The value to use if a key is missing in the table. * name: Optional name for the table, default to None. Args: resource_type: the output of the resource class's method. creator: the passed creator for the resource. Yields: A scope in which the creator is active Raises: RuntimeError: If resource_creator_scope is existed without proper nesting.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_resource_creator_scope arg:self arg:resource_type arg:creator arguments arg arg arg Assign Assign Call If Call For Call Call Assign Try If Compare Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "back_prop", + "source_code": "@property\ndef back_prop(self):\n return self._back_prop", + "docstring": "True iff backprop is enabled for this while loop.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:back_prop arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "draw", + "source_code": "def draw(self, n: int=1, out: Optional[torch.Tensor]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:\n if dtype is None:\n dtype = torch.get_default_dtype()\n if self.num_generated == 0:\n if n == 1:\n result = self._first_point.to(dtype)\n else:\n result, self.quasi = torch._sobol_engine_draw(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype)\n result = torch.cat((self._first_point.to(dtype), result), dim=-2)\n else:\n result, self.quasi = torch._sobol_engine_draw(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype)\n self.num_generated += n\n if out is not None:\n out.resize_as_(result).copy_(result)\n return out\n return result", + "docstring": "Function to draw a sequence of :attr: points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:. Args: n (Int, optional): The length of sequence of points to draw. Default: 1 out (Tensor, optional): The output tensor dtype (:class:, optional): the desired data type of the returned tensor. Default: ``", + "type": "method", + "file_path": "pytorch\\torch\\quasirandom.py", + "ast_data": "FunctionDef name:draw arg:self arg:n arg:out arg:dtype arguments arg arg arg arg If Compare Assign Call If Compare If Compare Assign Call Assign Call Assign Call Call Assign Call If Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_is_valid_grouped_gemm_fusion", + "source_code": "def _is_valid_grouped_gemm_fusion(computation_nodes):\n computation_op = mkldnn._linear_pointwise.default\n act = computation_nodes[0].args[0]\n wgt = computation_nodes[0].args[1]\n wgt_size = wgt.meta.get('val').size()\n return len(computation_nodes) >= 2 and all((node.target == computation_op and node.args[0] == act and (node.args[1].meta.get('val').size() == wgt_size) and (node.args[1] != wgt or gemm_idx == 0) for gemm_idx, node in enumerate(computation_nodes)))", + "docstring": "Here we check: 1. More than 1 GEMM nodes has been found. 2. All the GEMM nodes share the same activation. 3. All the GEMM nodes have same weight size but different wgt node.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\mkldnn_fusion.py", + "ast_data": "FunctionDef name:_is_valid_grouped_gemm_fusion arg:computation_nodes arguments arg Assign Assign Assign Assign Call Call Return return:yes BoolOp Compare Call Call BoolOp Compare Compare Compare Call Call BoolOp Compare Compare Call" + }, + { + "library": "django", + "name": "end_index", + "source_code": "def end_index(self):\n if self.number == self.paginator.num_pages:\n return self.paginator.count\n return self.number * self.paginator.per_page", + "docstring": "Return the 1-based index of the last object on this page, relative to total objects found (hits).", + "type": "method", + "file_path": "django\\django\\core\\paginator.py", + "ast_data": "FunctionDef name:end_index arg:self arguments arg If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_global_step_read", + "source_code": "def _get_global_step_read(graph=None):\n graph = graph or ops.get_default_graph()\n global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)\n if len(global_step_read_tensors) > 1:\n raise RuntimeError('There are multiple items in collection {}. There should be only one.'.format(GLOBAL_STEP_READ_KEY))\n if len(global_step_read_tensors) == 1:\n return global_step_read_tensors[0]\n return None", + "docstring": "Gets global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor. Raises: RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\training_util.py", + "ast_data": "FunctionDef name:_get_global_step_read arg:graph arguments arg Assign BoolOp Call Assign Call If Compare Call Raise Call Call If Compare Call Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "nunique", + "source_code": "def nunique(self, dropna: bool=True) -> Series | DataFrame:\n ids = self._grouper.ids\n ngroups = self._grouper.ngroups\n val = self.obj._values\n codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)\n if self._grouper.has_dropped_na:\n mask = ids >= 0\n ids = ids[mask]\n codes = codes[mask]\n group_index = get_group_index(labels=[ids, codes], shape=(ngroups, len(uniques)), sort=False, xnull=dropna)\n if dropna:\n mask = group_index >= 0\n if (~mask).any():\n ids = ids[mask]\n group_index = group_index[mask]\n mask = duplicated(group_index, 'first')\n res = np.bincount(ids[~mask], minlength=ngroups)\n res = ensure_int64(res)\n ri = self._grouper.result_index\n result: Series | DataFrame = self.obj._constructor(res, index=ri, name=self.obj.name)\n if not self.as_index:\n result = self._insert_inaxis_grouper(result)\n result.index = default_index(len(result))\n return result", + "docstring": "Return number of unique elements in the group. Parameters ---------- dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Number of unique values within each group. See Also -------- core.resample.Resampler.nunique : Method nunique for Resampler. Examples -------- >>> lst = [\"a\", \"a\", \"b\", \"b\"] >>> ser = pd.Series([1, 2, 3, 3], index=lst) >>> ser a 1 a 2 b 3 b 3 dtype: int64 >>> ser.groupby(level=0).nunique() a 2 b 1 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\generic.py", + "ast_data": "FunctionDef name:nunique arg:self arg:dropna arguments arg arg Assign Assign Assign Assign Call If Assign Compare Assign Assign Assign Call Call If Assign Compare If Call Assign Assign Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "result", + "source_code": "def result(self):\n sum_over_row = math_ops.cast(math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)\n sum_over_col = math_ops.cast(math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)\n true_positives = math_ops.cast(array_ops.tensor_diag_part(self.total_cm), dtype=self._dtype)\n denominator = sum_over_row + sum_over_col - true_positives\n num_valid_entries = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))\n iou = math_ops.div_no_nan(true_positives, denominator)\n return math_ops.div_no_nan(math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)", + "docstring": "Compute the mean intersection-over-union via the confusion matrix.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:result arg:self arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Assign Call Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n if not (pred.shape[2:] == target.shape[2:] and pred.size(0) == target.size(0) and (target.size(1) == 1)):\n raise ValueError(f'Prediction and target need to be of same size, and target should not be one-hot.Got {pred.shape} and {target.shape}.')\n if pred.size(1) < target.max().item():\n raise ValueError('Invalid target value.')\n out = stack([self.perform_erosion(pred[:, i:i + 1], where(target == i, tensor(1, device=target.device, dtype=target.dtype), tensor(0, device=target.device, dtype=target.dtype))) for i in range(pred.size(1))])\n if self.reduction == 'mean':\n out = out.mean()\n elif self.reduction == 'sum':\n out = out.sum()\n elif self.reduction == 'none':\n pass\n else:\n raise NotImplementedError(f'reduction `{self.reduction}` has not been implemented yet.')\n return out", + "docstring": "Compute Hausdorff loss. Args: pred: predicted tensor with a shape of :math: or :math:. Each channel is as binary as: 1 -> fg, 0 -> bg. target: target tensor with a shape of :math: or :math:. Returns: Estimated Hausdorff Loss.", + "type": "method", + "file_path": "kornia\\kornia\\losses\\hausdorff.py", + "ast_data": "FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg If BoolOp Compare Compare Call Call Compare Call Raise Call If Compare Call Call Call Raise Call Assign Call Call Call Compare Call Call Call Call If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_hatch_color", + "source_code": "def set_hatch_color(self, hatch_color):\n self._hatch_color = hatch_color", + "docstring": "Set the hatch color.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_hatch_color arg:self arg:hatch_color arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "get_usetex", + "source_code": "def get_usetex(self):\n return self._usetex", + "docstring": "Return whether this object uses TeX for rendering.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_usetex arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_FresnelCosGrad", + "source_code": "@ops.RegisterGradient('FresnelCos')\ndef _FresnelCosGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n return grad * math_ops.cos(np.pi / 2.0 * math_ops.square(x))", + "docstring": "Compute gradient of fresnel_cos(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_FresnelCosGrad arg:op arg:grad arguments arg arg Assign With Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "add_include_dirs", + "source_code": "def add_include_dirs(self, *paths):\n include_dirs = self.paths(paths)\n dist = self.get_distribution()\n if dist is not None:\n if dist.include_dirs is None:\n dist.include_dirs = []\n dist.include_dirs.extend(include_dirs)\n else:\n self.include_dirs.extend(include_dirs)", + "docstring": "Add paths to configuration include directories. Add the given sequence of paths to the beginning of the include_dirs list. This list will be visible to all extension modules of the current package.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:add_include_dirs arg:self arguments arg arg Assign Call Assign Call If Compare If Compare Assign Call Call" + }, + { + "library": "kornia", + "name": "left_to_right_epipolar_distance", + "source_code": "def left_to_right_epipolar_distance(pts1: Tensor, pts2: Tensor, Fm: Tensor) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(pts1)\n KORNIA_CHECK_IS_TENSOR(pts2)\n KORNIA_CHECK_IS_TENSOR(Fm)\n if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n if pts1.shape[-1] == 2:\n pts1 = convert_points_to_homogeneous(pts1)\n F_t: Tensor = Fm.transpose(dim0=-2, dim1=-1)\n line1_in_2: Tensor = pts1 @ F_t\n return point_line_distance(pts2, line1_in_2)", + "docstring": "Return one-sided epipolar distance for correspondences given the fundamental matrix. This method measures the distance from points in the right images to the epilines of the corresponding points in the left images as they reflect in the right images. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. Returns: the computed Symmetrical distance with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\_metrics.py", + "ast_data": "FunctionDef name:left_to_right_epipolar_distance arg:pts1 arg:pts2 arg:Fm arguments arg arg arg Call Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "read_char_array", + "source_code": "def read_char_array(self, hdr):\n arr = self.read_sub_array(hdr).astype(np.uint8)\n S = arr.tobytes().decode('latin-1')\n return np.ndarray(shape=hdr.dims, dtype=np.dtype('U1'), buffer=np.array(S)).copy()", + "docstring": "latin-1 text matrix (char matrix) reader Parameters ---------- hdr : `hdr`", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:read_char_array arg:self arg:hdr arguments arg arg Assign Call Call Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "scipy", + "name": "sos2zpk", + "source_code": "def sos2zpk(sos):\n xp = array_namespace(sos)\n sos = xp.asarray(sos)\n n_sections = sos.shape[0]\n z = xp.zeros(n_sections * 2, dtype=xp.complex128)\n p = xp.zeros(n_sections * 2, dtype=xp.complex128)\n k = 1.0\n for section in range(n_sections):\n zpk = tf2zpk(sos[section, :3], sos[section, 3:])\n z = xpx.at(z, slice(2 * section, 2 * section + zpk[0].shape[0])).set(zpk[0])\n p = xpx.at(p, slice(2 * section, 2 * section + zpk[1].shape[0])).set(zpk[1])\n k *= zpk[2]\n return (z, p, k)", + "docstring": "Return zeros, poles, and gain of a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape `sosfilt` even if some of these are (effectively) zero. .. versionadded:: 0.16.0", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:sos2zpk arg:sos arguments arg Assign Call Assign Call Assign Assign Call Assign Call Assign For Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "new_subgroups", + "source_code": "def new_subgroups(group_size=None, group=None, timeout=None, backend=None, pg_options=None, group_desc=None):\n if group_size is None:\n if not torch.cuda.is_available():\n raise ValueError(\"Default group size only takes effect when CUDA is available.If your subgroup using a backend that does not depend on CUDA,please pass in 'group_size' correctly.\")\n group_size = torch.cuda.device_count()\n if group_size <= 0:\n raise ValueError(f\"The arg 'group_size' ({group_size}) must be positive\")\n world_size = get_world_size(group=group)\n if world_size < group_size:\n raise ValueError(f\"The arg 'group_size' ({group_size}) must not exceed the world size ({world_size})\")\n if world_size % group_size != 0:\n raise ValueError(f\"The world size ({world_size}) must be divisible by 'group_size={group_size!r}'\")\n ranks = get_process_group_ranks(group=group or _get_default_group())\n ranks_per_subgroup_list = [ranks[i:i + group_size] for i in range(0, len(ranks), group_size)]\n return new_subgroups_by_enumeration(ranks_per_subgroup_list, timeout=timeout, backend=backend, pg_options=pg_options, group_desc=group_desc)", + "docstring": "Create subgroups of equal size. By default, it creates intra-machine subgroups, where each of which contains all the ranks of a machine, based on the assumption that each machine has the same number of devices. This is a convenience API that calls `Safe concurrent usagenew_groupinit_process_groupBackend` can be specified so that process group can pick up high priority cuda streams. group_desc (str, optional): A string describing the group. Each subgroup will inherit its group_desc Returns: The subgroup containing the current rank, and all the subgroups used for cleanup. Examples: >>> # Create intra-machine subgroups. >>> # xdoctest: +SKIP(\"need process group init\") >>> cur_subgroup, subgroups = dist.new_subgroups() >>> # Allreduce within the machine. >>> rank = dist.get_rank() >>> tensor = torch.ones(1, device=rank) * rank >>> dist.all_reduce(tensor, group=cur_subgroup) >>> tensor tensor([28]) # Assume 8 CUDA devices per machine. 28 is sum(range(8)). >>> # Cleanup. >>> for subgroup in subgroups: >>> dist.destroy_process_group(subgroup)", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:new_subgroups arg:group_size arg:group arg:timeout arg:backend arg:pg_options arg:group_desc arguments arg arg arg arg arg arg If Compare If Call Raise Call Assign Call If Compare Raise Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call BoolOp Call Assign Call Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "isstaticmethod", + "source_code": "def isstaticmethod(obj: Any, cls: Any=None, name: str | None=None) -> TypeIs[staticmethod[Any, Any]]:\n if isinstance(obj, staticmethod):\n return True\n if cls and name:\n sentinel = object()\n for basecls in getattr(cls, '__mro__', [cls]):\n meth = basecls.__dict__.get(name, sentinel)\n if meth is not sentinel:\n return isinstance(meth, staticmethod)\n return False", + "docstring": "Check if the object is a :class:.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:isstaticmethod arg:obj arg:cls arg:name arguments arg arg arg If Call Return return:yes If BoolOp Assign Call For Call Assign Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "update_if_finite_grads", + "source_code": "def update_if_finite_grads():\n\n def incr_loss_scale():\n new_loss_scale = self._current_loss_scale * self._multiplier\n return control_flow_ops.group(_assign_if_finite(self._current_loss_scale, new_loss_scale), self._num_good_steps.assign(0))\n return cond.cond(self._num_good_steps + 1 >= self._increment_period, incr_loss_scale, lambda: _op_in_graph_mode(self._num_good_steps.assign_add(1)))", + "docstring": "Update assuming the gradients are finite.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:update_if_finite_grads arguments FunctionDef name:incr_loss_scale arguments Assign Return return:yes Call Call Call Return return:yes Call Compare arguments Call Call" + }, + { + "library": "pytorch", + "name": "placements", + "source_code": "@property\ndef placements(self) -> tuple[Placement, ...]:\n return self._spec.placements", + "docstring": "The placements attribute of this DTensor that describes the layout of this DTensor on the its DeviceMesh. .. note:: `` is a read-only property, it can not be set.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:placements arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "load", + "source_code": "def load(self, state_dict: dict[str, Any]) -> None:\n loader.load(state_dict, storage_reader=self.storage_reader, process_group=self.process_group, planner=self.load_planner)", + "docstring": "Calls :py:meth: . Utilizing values passed during initialization.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py", + "ast_data": "FunctionDef name:load arg:self arg:state_dict arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "AnonymousExtensionTypeSpec", + "source_code": "@type_spec_registry.register('tf.AnonymousExtensionType.Spec')\nclass AnonymousExtensionTypeSpec(ExtensionTypeSpec):\n\n def __init__(self, **fields):\n for name in fields:\n if extension_type_field.ExtensionTypeField.is_reserved_name(name) or (name.startswith('__') and name.endswith('__')):\n raise ValueError(f'Reserved field name {name} was encountered when trying to instantiate an AnonymousExtensionTypeSpec.')\n fields = [(k, _convert_anonymous_fields(v, for_spec=True)) for k, v in fields.items()]\n self.__dict__.update(fields)\n super().__init__()\n value_type = AnonymousExtensionType\n\n def _serialize(self):\n return tuple(((name, _change_nested_mappings_to(value, dict)) for name, value in self.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(name)))\n\n def __setattr__(self, name, value):\n if name in type_spec.CACHED_FIXED_PROPERTIES:\n super().__setattr__(name, value)\n else:\n raise AttributeError(f'Cannot set attribute `{name}`. AnonymousExtensionTypeSpec instances are immutable.')\n\n def __delattr__(self, name):\n raise AttributeError(f'Cannot delete attribute `{name}`. AnonymousExtensionTypeSpec instances are immutable.')", + "docstring": "TypeSpec for AnonymousExtensionType.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "ClassDef name:AnonymousExtensionTypeSpec FunctionDef name:__init__ arg:self arguments arg arg For If BoolOp Call BoolOp Call Call Raise Call Assign Call Call Call Call Call Assign FunctionDef name:_serialize arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Compare Call Call Raise Call FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Raise Call Call" + }, + { + "library": "kornia", + "name": "update_from_dict", + "source_code": "def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n for k, v in dic.items():\n self.update(k, v, batch_size)", + "docstring": "Update the stats by the dict.", + "type": "method", + "file_path": "kornia\\kornia\\x\\utils.py", + "ast_data": "FunctionDef name:update_from_dict arg:self arg:dic arg:batch_size arguments arg arg arg For Call Call" + }, + { + "library": "scikit-learn", + "name": "_clear_state", + "source_code": "def _clear_state(self):\n for var in ('train_score_', 'validation_score_'):\n if hasattr(self, var):\n delattr(self, var)", + "docstring": "Clear the state of the gradient boosting model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_clear_state arg:self arguments arg For If Call Call" + }, + { + "library": "tensorflow", + "name": "_get_variable_nodes_from_graph_def", + "source_code": "def _get_variable_nodes_from_graph_def(graph_def):\n variables = [n for n in graph_def.node if n.op == 'VarHandleOp']\n variable_name_map = dict(((n.name, n) for n in variables))\n child_map = collections.defaultdict(lambda: [])\n for n in graph_def.node:\n for inp in n.input:\n if not inp.startswith('^'):\n child_map[inp].append(n)\n variables = {}\n for v_name, v_node in variable_name_map.items():\n queue = list(child_map[v_name])\n processed = set([])\n while queue:\n n_current = queue.pop()\n if n_current.name in processed:\n continue\n processed.add(n_current.name)\n if n_current.op in _PASS_THROUGH_VARIABLE_OPS:\n children = child_map.get(n_current.name, [])\n queue.extend(children)\n elif n_current.op not in _READ_ONLY_VARIABLE_OPS:\n variables[v_name] = (v_node, True)\n queue = []\n if v_name not in variables:\n variables[v_name] = (v_node, False)\n return variables", + "docstring": "Get the list of Variable nodes from . Args: graph_def: An instance of . This GraphDef *must* have already been optimized by Grappler. In particular, function inlining must have already happened. Returns: A dict mapping string names of variables to tuples , where is the corresponding to variable, and is a python bool describing whether the variable is modified during runtime.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_aot_compile.py", + "ast_data": "FunctionDef name:_get_variable_nodes_from_graph_def arg:graph_def arguments arg Assign Compare Assign Call Assign Call arguments For For If Call Call Assign For Call Assign Call Assign Call While Assign Call If Compare Call If Compare Assign Call Call If Compare Assign Assign If Compare Assign Return return:yes" + }, + { + "library": "numpy", + "name": "index", + "source_code": "@set_module('numpy.strings')\ndef index(a, sub, start=0, end=None):\n end = end if end is not None else MAX\n return _index_ufunc(a, sub, start, end)", + "docstring": "Like , but raises :exc: when the substring is not found. Parameters ---------- a : array-like, with `` dtype start, end : array_like, with any integer dtype, optional Returns ------- out : ndarray Output array of ints. See Also -------- find, str.index Examples -------- >>> import numpy as np >>> a = np.array([\"Computer Science\"]) >>> np.strings.index(a, \"Science\", start=0, end=None) array([9])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:index arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_to_matrix", + "source_code": "def _to_matrix(u):\n u_rank = len(u.shape)\n if u_rank not in [1, 2]:\n raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}'.format(u_rank))\n if u_rank == 1:\n return array_ops.expand_dims(u, 0)\n return u", + "docstring": "If input tensor is a vector (i.e., has rank 1), converts it to matrix.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\kernelized_utils.py", + "ast_data": "FunctionDef name:_to_matrix arg:u arguments arg Assign Call If Compare Raise Call Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, num_steps=None, last_step=None):\n if num_steps is None and last_step is None:\n raise ValueError('One of num_steps or last_step must be specified.')\n if num_steps is not None and last_step is not None:\n raise ValueError('Only one of num_steps or last_step can be specified.')\n self._num_steps = num_steps\n self._last_step = last_step", + "docstring": "Initializes a . This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if is specified, it indicates the number of steps to execute after is called. If instead is specified, it indicates the last step we want to execute, as passed to the call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_steps arg:last_step arguments arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Assign" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, node_pattern: NodePattern, modules: dict[str, torch.nn.Module], root_node_getter: Optional[Callable]=None, is_custom_module=False, is_standalone_module=False):\n self.node_pattern = node_pattern\n self.modules = modules\n if root_node_getter is None:\n root_node_getter = _default_root_node_getter\n self.root_node = root_node_getter(node_pattern)\n self.is_custom_module_ = is_custom_module\n self.is_standalone_module_ = is_standalone_module\n self.num_tensor_args = 0\n if isinstance(self.root_node, Node):\n cache_for_no_tensor_check: dict[Node, bool] = {}\n for arg_idx in range(len(self.root_node.args)):\n arg = self.root_node.args[arg_idx]\n if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, self.modules, cache_for_no_tensor_check)):\n self.num_tensor_args += 1", + "docstring": "Records pattern information in __init__, which will be used in convert", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_pattern arg:modules arg:root_node_getter arg:is_custom_module arg:is_standalone_module arguments arg arg arg arg arg arg Assign Assign If Compare Assign Assign Call Assign Assign Assign If Call For Call Call Assign If BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "compute_cosine_similarity", + "source_code": "@maybe_dequantize_first_two_tensor_args_and_handle_tuples\ndef compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n x = x.reshape(1, -1)\n y = y.reshape(1, -1)\n return torch.nn.functional.cosine_similarity(x, y)", + "docstring": "Computes the cosine similarity between and . Args: x: Tensor or tuple of tensors y: Tensor or tuple of tensors Return: float or tuple of floats", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py", + "ast_data": "FunctionDef name:compute_cosine_similarity arg:x arg:y arguments arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "url", + "source_code": "def url(self, name):\n raise NotImplementedError('subclasses of Storage must provide a url() method')", + "docstring": "Return an absolute URL where the file's contents can be accessed directly by a web browser.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:url arg:self arg:name arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "build_chunks", + "source_code": "def build_chunks(self) -> int:\n if self.size_check(self.proto_size):\n new_proto = type(self._proto)()\n new_proto.MergeFrom(self._proto)\n self._proto.Clear()\n self.add_chunk(new_proto, [])\n return self.proto_size\n return 0", + "docstring": "Creates a chunk for the entire proto and returns the original size.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py", + "ast_data": "FunctionDef name:build_chunks arg:self arguments arg If Call Assign Call Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_eval_if_composite", + "source_code": "def _eval_if_composite(self, tensor):\n from tensorflow.python.keras.utils import tf_utils\n if tf_utils.is_extension_type(tensor):\n return self._session.run(tensor)\n else:\n return tensor", + "docstring": "Helper method which evaluates any CompositeTensors passed to it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_eval_if_composite arg:self arg:tensor arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_ThetaShift", + "source_code": "class _ThetaShift(mtransforms.ScaledTranslation):\n\n def __init__(self, axes, pad, mode):\n super().__init__(pad, pad, axes.get_figure(root=False).dpi_scale_trans)\n self.set_children(axes._realViewLim)\n self.axes = axes\n self.mode = mode\n self.pad = pad\n __str__ = mtransforms._make_str_method('axes', 'pad', 'mode')\n\n def get_matrix(self):\n if self._invalid:\n if self.mode == 'rlabel':\n angle = np.deg2rad(self.axes.get_rlabel_position() * self.axes.get_theta_direction()) + self.axes.get_theta_offset() - np.pi / 2\n elif self.mode == 'min':\n angle = self.axes._realViewLim.xmin - np.pi / 2\n elif self.mode == 'max':\n angle = self.axes._realViewLim.xmax + np.pi / 2\n self._t = (self.pad * np.cos(angle) / 72, self.pad * np.sin(angle) / 72)\n return super().get_matrix()", + "docstring": "Apply a padding shift based on axes theta limits. This is used to create padding for radial ticks. Parameters ---------- axes : The owning Axes; used to determine limits. pad : float The padding to apply, in points. mode : {'min', 'max', 'rlabel'} Whether to shift away from the start (``).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "ClassDef name:_ThetaShift FunctionDef name:__init__ arg:self arg:axes arg:pad arg:mode arguments arg arg arg arg Call Call Call Call Assign Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If If Compare Assign Call Call Call Call If Compare Assign If Compare Assign Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_pad", + "source_code": "def _pad(x):\n shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)\n z = array_ops.zeros(shape, dtype=x.dtype)\n return array_ops.concat([z, x, z], axis=-1)", + "docstring": "Prepends and appends a zero to every vector in a batch of vectors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", + "ast_data": "FunctionDef name:_pad arg:x arguments arg Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_iteritems", + "source_code": "def _iteritems(d):\n return d.iteritems() if hasattr(d, 'iteritems') else d.items()", + "docstring": "Like d.iteritems, but accepts any collections.Mapping.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\_hash.py", + "ast_data": "FunctionDef name:_iteritems arg:d arguments arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "print_label", + "source_code": "def print_label(self, linecontour, labelwidth):\n return len(linecontour) > 10 * labelwidth or (len(linecontour) and (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any())", + "docstring": "Return whether a contour is long enough to hold a label.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\contour.py", + "ast_data": "FunctionDef name:print_label arg:self arg:linecontour arg:labelwidth arguments arg arg arg Return return:yes BoolOp Compare Call BoolOp Call Call Compare Call" + }, + { + "library": "scikit-learn", + "name": "_get_metadata_for_step", + "source_code": "def _get_metadata_for_step(self, *, step_idx, step_params, all_params):\n if self.transform_input is None or not all_params or (not step_params) or (step_idx == 0):\n return step_params\n sub_pipeline = self[:step_idx]\n sub_metadata_routing = get_routing_for_object(sub_pipeline)\n transform_params = {key: value for key, value in all_params.items() if key in sub_metadata_routing.consumes(method='transform', params=all_params.keys())}\n transformed_params = dict()\n transformed_cache = dict()\n for method, method_params in step_params.items():\n transformed_params[method] = Bunch()\n for param_name, param_value in method_params.items():\n if param_name in self.transform_input:\n transformed_params[method][param_name] = _cached_transform(sub_pipeline, cache=transformed_cache, param_name=param_name, param_value=param_value, transform_params=transform_params)\n else:\n transformed_params[method][param_name] = param_value\n return transformed_params", + "docstring": "Get params (metadata) for step . This transforms the metadata up to this step if required, which is indicated by the parameter. If a param in is included in the list, it will be transformed. Parameters ---------- step_idx : int Index of the step in the pipeline. step_params : dict Parameters specific to the step. These are routed parameters, e.g. . If a parameter name here is included in the , then it will be transformed. Note that these parameters are *after* routing, so the aliases are already resolved. all_params : dict All parameters passed by the user. Here this is used to call on the slice of the pipeline itself. Returns ------- dict Parameters to be passed to the step. The ones which should be transformed are transformed.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_get_metadata_for_step arg:self arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Assign Assign Call Assign Call Compare Call Call Assign Call Assign Call For Call Assign Call For Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_filter_top_k", + "source_code": "def _filter_top_k(x, k):\n _, top_k_idx = nn_ops.top_k(x, k, sorted=False)\n top_k_mask = math_ops.reduce_sum(array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)\n return x * top_k_mask + NEG_INF * (1 - top_k_mask)", + "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py", + "ast_data": "FunctionDef name:_filter_top_k arg:x arg:k arguments arg arg Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "compose_policy", + "source_code": "def compose_policy(self, policy: List[SUBPOLICY_CONFIG]) -> List[PolicySequential]:\n return [self.compose_subpolicy_sequential(subpolicy) for subpolicy in policy]", + "docstring": "Compose policy by the provided policy config.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\auto\\base.py", + "ast_data": "FunctionDef name:compose_policy arg:self arg:policy arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "increment_version", + "source_code": "def increment_version(tensor: Union[torch.Tensor, Iterable[torch.Tensor]]) -> None:\n if isinstance(tensor, torch.Tensor):\n tensor = (tensor,)\n torch._C._increment_version(tensor)", + "docstring": "Update autograd metadata tracking whether the given Tensor was modified in place. This is to enable more accurate error checking within the autograd engine. It is already done automatically by PyTorch functions and within custom Function when mark_dirty() is called appropriately so you only need to call this explicitly if you are doing inplace operation on the Tensor data in a way that Pytorch doesn't know about. For example a custom kernel that reads the Tensor data_ptr and modifies the memory inplace based on this pointer. Can accept either a tensor, or a list of tensors. Note that incrementing the version counter multiple times for a single inplace operation is not problematic. Note that if you pass in tensor constructed under torch.inference_mode(), we will not bump its version counter (because your tensor does not have one).", + "type": "function", + "file_path": "pytorch\\torch\\autograd\\graph.py", + "ast_data": "FunctionDef name:increment_version arg:tensor arguments arg If Call Assign Call" + }, + { + "library": "pytorch", + "name": "boxed_run", + "source_code": "@compatibility(is_backward_compatible=True)\ndef boxed_run(self, args_list):\n args_iter = iter(args_list)\n env = {}\n for n in self.graph.nodes:\n if n.op == 'placeholder':\n env[n] = next(args_iter)\n args_list.clear()\n return self.run(initial_env=env)", + "docstring": "Run via interpretation and return the result. This uses the \"boxed\" calling convention, where you pass a list of arguments, which will be cleared by the interpreter. This ensures that input tensors are promptly deallocated.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\interpreter.py", + "ast_data": "FunctionDef name:boxed_run arg:self arg:args_list arguments arg arg Assign Call Assign For If Compare Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self, closure: Optional[Callable[[], float]]=None) -> Optional[float]:\n raise NotImplementedError", + "docstring": "Perform a single optimization step to update parameter. Args: closure (Callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\optimizer.py", + "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "get_shapes", + "source_code": "def get_shapes(tensors):\n return nest.map_structure(lambda x: x.shape, tensors)", + "docstring": "Gets shapes from tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:get_shapes arg:tensors arguments arg Return return:yes Call arguments arg" + }, + { + "library": "django", + "name": "response_delete", + "source_code": "def response_delete(self, request, obj_display, obj_id):\n if IS_POPUP_VAR in request.POST:\n popup_response_data = json.dumps({'action': 'delete', 'value': str(obj_id)})\n return TemplateResponse(request, self.popup_response_template or ['admin/%s/%s/popup_response.html' % (self.opts.app_label, self.opts.model_name), 'admin/%s/popup_response.html' % self.opts.app_label, 'admin/popup_response.html'], {'popup_response_data': popup_response_data})\n self.message_user(request, _('The %(name)s “%(obj)s” was deleted successfully.') % {'name': self.opts.verbose_name, 'obj': obj_display}, messages.SUCCESS)\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name)\n preserved_filters = self.get_preserved_filters(request)\n post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': self.opts}, post_url)\n else:\n post_url = reverse('admin:index', current_app=self.admin_site.name)\n return HttpResponseRedirect(post_url)", + "docstring": "Determine the HttpResponse for the delete_view stage.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:response_delete arg:self arg:request arg:obj_display arg:obj_id arguments arg arg arg arg If Compare Assign Call Call Return return:yes Call BoolOp Call Call If Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "glossary", + "source_code": "class glossary(nodes.Element):\n pass", + "docstring": "Node to insert a glossary.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:glossary" + }, + { + "library": "tensorflow", + "name": "_export_debug_info", + "source_code": "def _export_debug_info(exported_graph: ops.Graph, export_dir: str):\n debug_builder = tf_stack.GraphDebugInfoBuilder()\n for fn_name in exported_graph._functions:\n fn = exported_graph._get_function(fn_name)\n if not isinstance(fn, defun.AtomicFunction):\n continue\n debug_builder.AppendGraphDebugInfo(fn_name, fn.graph_debug_info)\n graph_debug_info = debug_builder.Build()\n file_io.atomic_write_string_to_file(file_io.join(path_helpers.get_or_create_debug_dir(export_dir), constants.DEBUG_INFO_FILENAME_PB), graph_debug_info.SerializeToString(deterministic=True))", + "docstring": "Exports debug information from graph to file. Creates and writes GraphDebugInfo with traces for ops in all functions of the exported_graph. Args: exported_graph: A Graph that has been created by tracing a saveable view. export_dir: SavedModel directory in which to write the debug info.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_export_debug_info arg:exported_graph arg:export_dir arguments arg arg Assign Call For Assign Call If Call Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_set_rank_if_unknown", + "source_code": "def _set_rank_if_unknown(self, new_rank: int) -> 'DynamicRaggedShape.Spec':\n if new_rank is None:\n raise TypeError('new_rank is None, but expected int')\n if new_rank < 0:\n raise ValueError('Rank must be non-negative')\n current_rank = self.rank\n if current_rank is not None and current_rank < new_rank:\n raise ValueError('Rank is {current_rank}, expected at least {new_rank}.'.format(current_rank=current_rank, new_rank=new_rank))\n if current_rank is not None:\n return self\n if self._row_partitions:\n new_inner_rank = max(new_rank - self.num_row_partitions, 1)\n first_dim = self._row_partitions[-1].nvals\n static_inner_shape = tensor_shape.TensorShape([first_dim] + [None] * (new_inner_rank - 1))\n else:\n static_inner_shape = tensor_shape.TensorShape([None] * new_rank)\n return DynamicRaggedShape.Spec(row_partitions=self._row_partitions, static_inner_shape=static_inner_shape, dtype=self.dtype)", + "docstring": "Ensures this has a known rank at least new_rank.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_set_rank_if_unknown arg:self arg:new_rank arguments arg arg If Compare Raise Call If Compare Raise Call Assign If BoolOp Compare Compare Raise Call Call If Compare Return return:yes If Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_parse_dtensor_env_var_from_cluster_resolver", + "source_code": "def _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver):\n result = {}\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_resolver.cluster_spec())\n dtensor_jobs = []\n if 'chief' in cluster_spec.jobs:\n dtensor_jobs.extend(cluster_spec.job_tasks('chief'))\n if 'worker' in cluster_spec.jobs:\n dtensor_jobs.extend(cluster_spec.job_tasks('worker'))\n if None in dtensor_jobs:\n raise ValueError(f'Unexpected dtensor job address from cluster spec: {cluster_spec}')\n result['DTENSOR_JOBS'] = ','.join(dtensor_jobs)\n result['DTENSOR_NUM_CLIENTS'] = str(len(dtensor_jobs))\n if cluster_resolver.task_type == 'chief':\n dtensor_client_id = 0\n elif cluster_resolver.task_type == 'worker':\n dtensor_client_id = cluster_resolver.task_id\n if 'chief' in cluster_spec.jobs:\n dtensor_client_id += 1\n result['DTENSOR_CLIENT_ID'] = str(dtensor_client_id)\n result['DTENSOR_JOB_NAME'] = 'worker'\n return result", + "docstring": "Parse the env vars for Dtensor based on the cluster resolver. In the multi-client setting, each of the DTensor jobs need to aware of each other, and the interface to setup those values are via the envvars. The value used by dtensor are different from the existing . This function will parse the value from cluster resolver, and populate the corresponding value for DTensor jobs in the . Args: cluster_resolver: A instance. Returns: A dict of {Str:Str} which contains all the env vars needed by DTensor jobs. The value is for verification purpose. Raises: The value parsed from existing cluster spec is not valid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\multi_worker_mirrored_strategy.py", + "ast_data": "FunctionDef name:_parse_dtensor_env_var_from_cluster_resolver arg:cluster_resolver arguments arg Assign Assign Call Call Assign If Compare Call Call If Compare Call Call If Compare Raise Call Assign Call Assign Call Call If Compare Assign If Compare Assign If Compare Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "replace_regex", + "source_code": "def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None) -> None:\n if isna(value) or not isinstance(value, str):\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return value if rx.search(s) is not None else s\n else:\n return s\n else:\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return rx.sub(value, s)\n else:\n return s\n f = np.vectorize(re_replacer, otypes=[np.object_])\n if mask is None:\n values[:] = f(values)\n else:\n if values.ndim != mask.ndim:\n mask = np.broadcast_to(mask, values.shape)\n values[mask] = f(values[mask])", + "docstring": "Parameters ---------- values : ArrayLike Object dtype. rx : re.Pattern value : Any mask : np.ndarray[bool], optional Notes ----- Alters values in-place.", + "type": "function", + "file_path": "pandas\\pandas\\core\\array_algos\\replace.py", + "ast_data": "FunctionDef name:replace_regex arg:values arg:rx arg:value arg:mask arguments arg arg arg arg If BoolOp Call Call FunctionDef name:re_replacer arg:s arguments arg If BoolOp Call Call Return return:yes Compare Call Return return:yes FunctionDef name:re_replacer arg:s arguments arg If BoolOp Call Call Return return:yes Call Return return:yes Assign Call If Compare Assign Call If Compare Assign Call Assign Call" + }, + { + "library": "scikit-learn", + "name": "dtypes", + "source_code": "@cache\ndef dtypes(self, *, device=None, kind=None):\n res = self._dtypes(kind)\n for k, v in res.copy().items():\n try:\n torch.empty((0,), dtype=v, device=device)\n except:\n del res[k]\n return res", + "docstring": "The array API data types supported by PyTorch. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : Device, optional The device to get the data types for. Unused for PyTorch, as all devices use the same dtypes. kind : str or tuple of str, optional The kind of data types to return. If ``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding PyTorch data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64}", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_info.py", + "ast_data": "FunctionDef name:dtypes arg:self arguments arg arg arg Assign Call For Call Call Try Call ExceptHandler Return return:yes" + }, + { + "library": "pytorch", + "name": "get_default_custom_config_dict", + "source_code": "def get_default_custom_config_dict():\n return _DEFAULT_CUSTOM_CONFIG_DICT", + "docstring": "Defines the default custom config dict.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", + "ast_data": "FunctionDef name:get_default_custom_config_dict arguments Return return:yes" + }, + { + "library": "numpy", + "name": "openhook", + "source_code": "def openhook(filename, mode):\n if charset_normalizer is not None:\n encoding = charset_normalizer.from_path(filename).best().encoding\n else:\n nbytes = min(32, os.path.getsize(filename))\n with open(filename, 'rb') as fhandle:\n raw = fhandle.read(nbytes)\n if raw.startswith(codecs.BOM_UTF8):\n encoding = 'UTF-8-SIG'\n elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):\n encoding = 'UTF-32'\n elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)):\n encoding = 'UTF-16'\n else:\n encoding = 'ascii'\n return open(filename, mode, encoding=encoding)", + "docstring": "Ensures that filename is opened with correct encoding parameter. This function uses charset_normalizer package, when available, for determining the encoding of the file to be opened. When charset_normalizer is not available, the function detects only UTF encodings, otherwise, ASCII encoding is used as fallback.", + "type": "function", + "file_path": "numpy\\numpy\\f2py\\crackfortran.py", + "ast_data": "FunctionDef name:openhook arg:filename arg:mode arguments arg arg If Compare Assign Call Call Assign Call Call With Call Assign Call If Call Assign If Call Assign If Call Assign Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_rows", + "source_code": "def get_rows(self, infer_nrows: int, skiprows: set[int] | None=None) -> list[str]:\n if skiprows is None:\n skiprows = set()\n buffer_rows = []\n detect_rows = []\n for i, row in enumerate(self.f):\n if i not in skiprows:\n detect_rows.append(row)\n buffer_rows.append(row)\n if len(detect_rows) >= infer_nrows:\n break\n self.buffer = iter(buffer_rows)\n return detect_rows", + "docstring": "Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read.", + "type": "method", + "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py", + "ast_data": "FunctionDef name:get_rows arg:self arg:infer_nrows arg:skiprows arguments arg arg arg If Compare Assign Call Assign Assign For Call If Compare Call Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "categorical_column_with_identity", + "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.categorical_column_with_identity')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef categorical_column_with_identity(key, num_buckets, default_value=None):\n if num_buckets < 1:\n raise ValueError('num_buckets {} < 1, column_name {}'.format(num_buckets, key))\n if default_value is not None and (default_value < 0 or default_value >= num_buckets):\n raise ValueError('default_value {} not in range [0, {}), column_name {}'.format(default_value, num_buckets, key))\n fc_utils.assert_key_is_string(key)\n return IdentityCategoricalColumn(key=key, number_buckets=num_buckets, default_value=default_value)", + "docstring": "A that returns identity values. Use this when your inputs are integers in the range , and you want to use the input value itself as the categorical ID. Values outside this range will result in if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider in that case. For input dictionary , is either or . If , missing values can be represented by for int and for string, which will be dropped by this feature column. In the following examples, each input in the range is assigned the same value. All other inputs are assigned 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: Embedding for a DNN model: Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature objects, and feature columns. num_buckets: Range of inputs and outputs is . default_value: If set, values outside of range will be replaced with this value. If not set, values >= num_buckets will cause a failure while values < 0 will be dropped. Returns: A that returns identity values. Raises: ValueError: if is less than one. ValueError: if is not in range .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:categorical_column_with_identity arg:key arg:num_buckets arg:default_value arguments arg arg arg If Compare Raise Call Call If BoolOp Compare BoolOp Compare Compare Raise Call Call Call Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_RecvInfo", + "source_code": "class _RecvInfo:\n\n def __init__(self, input_name: str, source: int, buffer: torch.Tensor):\n self.input_name = input_name\n self.source = source\n self.buffer = buffer\n\n def __repr__(self):\n return f'_RecvInfo(input={self.input_name}, source={self.source}, shape={self.buffer.size()})'", + "docstring": "Represents a stage input.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "ClassDef name:_RecvInfo FunctionDef name:__init__ arg:self arg:input_name arg:source arg:buffer arguments arg arg arg arg Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "revoke_token", + "source_code": "def revoke_token(self, token, request):\n raise NotImplementedError()", + "docstring": "Mark token as revoked. Since token MUST be unique, it would be dangerous to delete it. Consider this situation: 1. Jane obtained a token XYZ 2. Jane revoked (deleted) token XYZ 3. Bob generated a new token XYZ 4. Jane can use XYZ to access Bob's resource It would be secure to mark a token as revoked:: def revoke_token(self, token, request): hint = request.form.get(\"token_type_hint\") if hint == \"access_token\": token.access_token_revoked = True else: token.access_token_revoked = True token.refresh_token_revoked = True token.save()", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py", + "ast_data": "FunctionDef name:revoke_token arg:self arg:token arg:request arguments arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X, y=None, groups=None):\n return super().split(X, y, groups)", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "urljoin_bytes", + "source_code": "def urljoin_bytes(*atoms):\n url = b'/'.join([x for x in atoms if x])\n while b'//' in url:\n url = url.replace(b'//', b'/')\n return url or b'/'", + "docstring": "Return the given path , joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:urljoin_bytes arguments arg Assign Call While Compare Assign Call Return return:yes BoolOp" + }, + { + "library": "django", + "name": "iri_to_uri", + "source_code": "def iri_to_uri(iri):\n if iri is None:\n return iri\n elif isinstance(iri, Promise):\n iri = str(iri)\n return quote(iri, safe=\"/#%[]=:;$&()+,!?*@'~\")", + "docstring": "Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from RFC 3987 Section 3.1, slightly simplified since the input is assumed to be a string rather than an arbitrary byte stream. Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or b'/I ♥ Django/') and return a string containing the encoded result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').", + "type": "function", + "file_path": "django\\django\\utils\\encoding.py", + "ast_data": "FunctionDef name:iri_to_uri arg:iri arguments arg If Compare Return return:yes If Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "_describe_identifier", + "source_code": "def _describe_identifier(self, signode: TextElement, identnode: TextElement, env: BuildEnvironment, symbol: Symbol) -> None:\n raise NotImplementedError", + "docstring": "Render the prefix into signode, and the last part into identnode.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\cpp\\_ast.py", + "ast_data": "FunctionDef name:_describe_identifier arg:self arg:signode arg:identnode arg:env arg:symbol arguments arg arg arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "_handle_col_wise_sharding", + "source_code": "def _handle_col_wise_sharding(input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg):\n gathered_inputs = all_gather(input, group=pg)\n if max_norm is not None:\n local_shard = _handle_max_norm_col_wise(max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg)\n output = _handle_col_wise_sharding_base(torch.nn.functional.embedding, len(input.size()), input, world_size, weight, local_shard, pg, gathered_inputs, padding_idx=padding_idx)\n return (output, local_shard)", + "docstring": "Entry-point function to handle the logic of col-wise sharding of weight for embedding. (Detailed explanations of the logic can be found in the comment for sharded_embedding.) Args: input: list of ID used for lookup and aggregation. world_size: number of ranks. weight: sharded weight tensor. local_shard: col-wise shared local weight used for lookup. max_norm: If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type: The p in the p-norm to compute for the max_norm option. padding_idx: If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed \"pad\". pg: process group. Returns: final result of lookup.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\embedding.py", + "ast_data": "FunctionDef name:_handle_col_wise_sharding arg:input arg:world_size arg:weight arg:local_shard arg:max_norm arg:norm_type arg:padding_idx arg:pg arguments arg arg arg arg arg arg arg arg Assign Call If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "dump_node_schedule", + "source_code": "def dump_node_schedule(node_schedule: Sequence[BaseSchedulerNode]) -> None:\n from torch._inductor.codegen.simd import DisableReduction, EnableReduction\n from torch._inductor.scheduler import SchedulerNode\n print(f'Node schedule with {len(node_schedule)} nodes')\n for idx, node in enumerate(node_schedule):\n print(f' {idx:3}:')\n if node is EnableReduction:\n print('enable reduction')\n elif node is DisableReduction:\n print('disable reduction')\n elif isinstance(node, SchedulerNode):\n is_red = node.is_reduction()\n print(f'{('red' if is_red else 'pw')} scheduler node')\n if is_red:\n assert node.node is not None\n print(f'original reduction hint {node.node.data.reduction_hint}')\n print('ReadDep:')\n for dep in node.read_writes.reads:\n print(dep)\n print('WriteDep:')\n for dep in node.read_writes.writes:\n print(dep)\n else:\n raise RuntimeError(f'Unrecognized node type: {type(node)}')", + "docstring": "An API that can be used in pdb to dump a node_schedule. Right mainly dump the read/write dependencies but can add more as needed.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:dump_node_schedule arg:node_schedule arguments arg Call Call For Call Call If Compare Call If Compare Call If Call Assign Call Call If Compare Call Call For Call Call For Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "is_sharded", + "source_code": "def is_sharded(self, tensor: Tensor) -> bool:\n if not hasattr(self.flat_param, '_sharded_size') or not self.uses_sharded_strategy:\n return False\n sharded_size = self.flat_param._sharded_size\n return tensor.size() == sharded_size", + "docstring": "Return whether `` for clarity.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:is_sharded arg:self arg:tensor arguments arg arg If BoolOp Call Return return:yes Assign Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "astype", + "source_code": "def astype(self, dtype: Dtype, copy: bool=True):\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if self.dtype == dtype:\n return self.copy() if copy else self\n values = self._data\n if isinstance(values, ExtensionArray):\n with rewrite_exception(type(values).__name__, type(self).__name__):\n new_values = values.astype(dtype, copy=copy)\n elif isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n new_values = cls._from_sequence(self, dtype=dtype, copy=copy)\n else:\n new_values = astype_array(values, dtype=dtype, copy=copy)\n result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)\n if not copy and self._references is not None and astype_is_view(self.dtype, dtype):\n result._references = self._references\n result._references.add_index_reference(result)\n return result", + "docstring": "Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer is treated as `dtype`, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. See Also -------- Index.dtype: Return the dtype object of the underlying data. Index.dtypes: Return the dtype object of the underlying data. Index.convert_dtypes: Convert columns to the best possible dtypes. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.astype(\"float\") Index([1.0, 2.0, 3.0], dtype='float64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:copy arguments arg arg arg If Compare Assign Call If Compare Return return:yes Call Assign If Call With Call Call Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "_save_outputs", + "source_code": "def _save_outputs(self, outputs: Union[Tensor, List[Tensor]], directory: Optional[str]=None, suffix: str='') -> None:\n if directory is None:\n name = f'{self.name}_{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}'\n directory = os.path.join('kornia_outputs', name)\n os.makedirs(directory, exist_ok=True)\n for i, out_image in enumerate(outputs):\n write_image(os.path.join(directory, f'{str(i).zfill(6)}{suffix}.jpg'), out_image.mul(255.0).byte())\n logger.info(f'Outputs are saved in {directory}')", + "docstring": "Save the output image(s) to a directory. Args: outputs: output tensor. directory: directory to save the images. suffix: filename suffix.", + "type": "method", + "file_path": "kornia\\kornia\\models\\base.py", + "ast_data": "FunctionDef name:_save_outputs arg:self arg:outputs arg:directory arg:suffix arguments arg arg arg arg If Compare Assign Call Call Assign Call Call For Call Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "get", + "source_code": "@staticmethod\ndef get(place):\n return 'DOC_' + place.upper().replace('.', '_')", + "docstring": "Returns the C #definition name of docstring according to ufunc place. C #definitions are generated by generate_umath_doc.py in a separate C header.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py", + "ast_data": "FunctionDef name:get arg:place arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "flow", + "source_code": "@property\ndef flow(self):\n return self._implementation._flow", + "docstring": "The flow forcing ops leading to this TensorArray state.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:flow arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, patch):\n super().__init__(patch.get_path(), patch.get_transform())\n self._patch = patch", + "docstring": "Parameters ---------- patch :", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:patch arguments arg arg Call Call Call Call Assign" + }, + { + "library": "django", + "name": "nodata_value", + "source_code": "@property\ndef nodata_value(self):\n nodata_exists = c_int()\n value = capi.get_band_nodata_value(self._ptr, nodata_exists)\n if not nodata_exists:\n value = None\n elif self.datatype() in GDAL_INTEGER_TYPES:\n value = int(value)\n return value", + "docstring": "Return the nodata value for this band, or None if it isn't set.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:nodata_value arg:self arguments arg Assign Call Assign Call If Assign If Compare Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_validate_targets", + "source_code": "def _validate_targets(self, y):\n return column_or_1d(y, warn=True).astype(np.float64, copy=False)", + "docstring": "Validation of y and class_weight. Default implementation for SVR and one-class; overridden in BaseSVC.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\svm\\_base.py", + "ast_data": "FunctionDef name:_validate_targets arg:self arg:y arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_TransposeTridiagonalMatrix", + "source_code": "def _TransposeTridiagonalMatrix(diags):\n diag = diags[..., 1, :]\n if diags.shape.is_fully_defined():\n zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)\n superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)\n subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)\n else:\n rank = array_ops.rank(diags)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])), axis=0)\n superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)\n subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])), axis=0)\n subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)\n return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)", + "docstring": "Transposes a tridiagonal matrix. Args: diags: the diagonals of the input matrix in the compact form (see linalg_ops.tridiagonal_solve). Returns: Diagonals of the transposed matrix in the compact form.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_TransposeTridiagonalMatrix arg:diags arguments arg Assign If Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "slugify", + "source_code": "@register.filter(is_safe=True)\n@stringfilter\ndef slugify(value):\n return _slugify(value)", + "docstring": "Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:slugify arg:value arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "fn_input_signature", + "source_code": "@property\ndef fn_input_signature(self):\n if self._has_kwargs:\n return None\n if None in nest.flatten(self._input_signature):\n return None\n return self._input_signature", + "docstring": "Returns input signature for the wrapped layer call function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:fn_input_signature arg:self arguments arg If Return return:no If Compare Call Return return:no Return return:yes" + }, + { + "library": "scikit-learn", + "name": "Parallel", + "source_code": "class Parallel(joblib.Parallel):\n\n def __call__(self, iterable):\n config = get_config()\n warning_filters = warnings.filters\n iterable_with_config_and_warning_filters = ((_with_config_and_warning_filters(delayed_func, config, warning_filters), args, kwargs) for delayed_func, args, kwargs in iterable)\n return super().__call__(iterable_with_config_and_warning_filters)", + "docstring": "Tweak of :class: that propagates the scikit-learn configuration. This subclass of :class: ensures that the active configuration (thread-local) of scikit-learn is propagated to the parallel workers for the duration of the execution of the parallel tasks. The API does not change and you can refer to :class: documentation for more details. .. versionadded:: 1.3", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\parallel.py", + "ast_data": "ClassDef name:Parallel FunctionDef name:__call__ arg:self arg:iterable arguments arg arg Assign Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_sortable_by", + "source_code": "def get_sortable_by(self, request):\n return self.sortable_by if self.sortable_by is not None else self.get_list_display(request)", + "docstring": "Hook for specifying which fields can be sorted in the changelist.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_sortable_by arg:self arg:request arguments arg arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_save_model_and_copy_assets", + "source_code": "def _save_model_and_copy_assets(exported_model: exported_model_pb2.ExportedModel, src_saved_model_path: str, dst_saved_model_path: str, signature_def_map: Mapping[str, meta_graph_pb2.SignatureDef], tags: Collection[str]) -> bool:\n save_model.save_model_v1(exported_model.graph_def, dst_saved_model_path, signature_def_map, tags, init_op_name=exported_model.init_node_name, saver_def=_get_saver_def_or_none(exported_model), checkpoint_dir=exported_model.checkpoint_dir, function_aliases=exported_model.function_aliases, asset_file_defs=exported_model.asset_file_defs)\n _copy_assets(src_saved_model_path, dst_saved_model_path)\n return True", + "docstring": "Saves the model and copies the assets from the source model. Args: exported_model: ExportedModel to save. src_saved_model_path: Path to the source SavedModel. This will be used to copy the asset files to . dst_saved_model_path: Destination path to save the exported model. signature_def_map: Signature key -> SignatureDef mapping. tags: Tags to attach to the saved MetaGraphDef. Returns: upon successfully saving the model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py", + "ast_data": "FunctionDef name:_save_model_and_copy_assets arg:exported_model arg:src_saved_model_path arg:dst_saved_model_path arg:signature_def_map arg:tags arguments arg arg arg arg arg Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "loads", + "source_code": "def loads(s, encode_nominal=False, return_type=DENSE):\n decoder = ArffDecoder()\n return decoder.decode(s, encode_nominal=encode_nominal, return_type=return_type)", + "docstring": "Convert a string instance containing the ARFF document into a Python object. :param s: a string object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of , , , or . Consult the sections on _ and _. :return: a dictionary.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "FunctionDef name:loads arg:s arg:encode_nominal arg:return_type arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "to_dense", + "source_code": "def to_dense(self):\n ret = np.zeros([self.n, self.m], dtype=np.float64)\n nvals = self.vals.size\n for i in range(nvals):\n ret[self.rows[i], self.cols[i]] += self.vals[i]\n return ret", + "docstring": "Return a dense matrix representing self, mainly for debugging purposes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:to_dense arg:self arguments arg Assign Call Assign For Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_canonical_name_meson", + "source_code": "def get_canonical_name_meson(target, build_path):\n assert len(target['filename']) == 1\n shared_library_path = Path(target['filename'][0])\n shared_library_relative_path = shared_library_path.relative_to(build_path.absolute())\n rel_path = shared_library_relative_path.as_posix()\n pattern = '\\\\.(cpython|cp\\\\d+)-.+'\n return re.sub(pattern, '', str(rel_path))", + "docstring": "Return a name based on generated shared library. The goal is to return a name that can be easily matched with the output from . Look at docstring to see what looks like.", + "type": "function", + "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py", + "ast_data": "FunctionDef name:get_canonical_name_meson arg:target arg:build_path arguments arg arg Compare Call Assign Call Assign Call Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "groups", + "source_code": "def groups(self) -> list:\n _tables()\n self._check_if_open()\n assert self._handle is not None\n assert _table_mod is not None\n return [g for g in self._handle.walk_groups() if not isinstance(g, _table_mod.link.Link) and (getattr(g._v_attrs, 'pandas_type', None) or getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != 'table'))]", + "docstring": "Return a list of all the top-level nodes. Each node returned is not a pandas storage object. Returns ------- list List of objects. See Also -------- HDFStore.get_node : Returns the node with the key. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df) # doctest: +SKIP >>> print(store.groups()) # doctest: +SKIP >>> store.close() # doctest: +SKIP [/data (Group) '' children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array), 'block0_items' (Array)]]", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:groups arg:self arguments arg Call Call Compare Compare Return return:yes Call BoolOp Call BoolOp Call Call BoolOp Call Compare" + }, + { + "library": "scipy", + "name": "pointbiserialr", + "source_code": "@_axis_nan_policy_factory(_pack_CorrelationResult, n_samples=2, result_to_tuple=_unpack_CorrelationResult, paired=True, too_small=1, n_outputs=3)\ndef pointbiserialr(x, y):\n rpb, prob = pearsonr(x, y)\n res = SignificanceResult(rpb, prob)\n res.correlation = rpb\n return res", + "docstring": "Calculate a point biserial correlation coefficient and its p-value. The point biserial correlation is used to measure the relationship between a binary variable, x, and a continuous variable, y. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply a determinative relationship. This function may be computed using a shortcut formula but produces the same result as . Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- res: SignificanceResult An object containing attributes: statistic : float The R value. pvalue : float The two-sided p-value. Notes ----- uses a t-test with `pearsonr\\overline{Y_{0}}\\overline{Y_{1}}N_{0}N_{1}Ns_{y}r_{pb}N-2r_{pb}r_{pb}10.1002/9781118445112.stat06227` Examples -------- >>> import numpy as np >>> from scipy import stats >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) >>> b = np.arange(7) >>> stats.pointbiserialr(a, b) (0.8660254037844386, 0.011724811003954652) >>> stats.pearsonr(a, b) (0.86602540378443871, 0.011724811003954626) >>> np.corrcoef(a, b) array([[ 1. , 0.8660254], [ 0.8660254, 1. ]])", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:pointbiserialr arg:x arg:y arguments arg arg Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_changed_params", + "source_code": "def _changed_params(estimator):\n params = estimator.get_params(deep=False)\n init_func = getattr(estimator.__init__, 'deprecated_original', estimator.__init__)\n init_params = inspect.signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n\n def has_changed(k, v):\n if k not in init_params:\n return True\n if init_params[k] == inspect._empty:\n return True\n if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:\n return True\n if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n return True\n return False\n return {k: v for k, v in params.items() if has_changed(k, v)}", + "docstring": "Return dict (param_name: value) of parameters that were given to estimator with non-default values.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py", + "ast_data": "FunctionDef name:_changed_params arg:estimator arguments arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:has_changed arg:k arg:v arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Call Compare Return return:yes If BoolOp Compare Call Call BoolOp Call Call Return return:yes Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_or_create_variables_dir", + "source_code": "def get_or_create_variables_dir(export_dir):\n variables_dir = get_variables_dir(export_dir)\n file_io.recursive_create_dir(variables_dir)\n return variables_dir", + "docstring": "Return variables sub-directory, or create one if it doesn't exist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py", + "ast_data": "FunctionDef name:get_or_create_variables_dir arg:export_dir arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "factorize_array", + "source_code": "def factorize_array(values: np.ndarray, use_na_sentinel: bool=True, size_hint: int | None=None, na_value: object=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[npt.NDArray[np.intp], np.ndarray]:\n original = values\n if values.dtype.kind in 'mM':\n na_value = iNaT\n hash_klass, values = _get_hashtable_algo(values)\n table = hash_klass(size_hint or len(values))\n uniques, codes = table.factorize(values, na_sentinel=-1, na_value=na_value, mask=mask, ignore_na=use_na_sentinel)\n uniques = _reconstruct_data(uniques, original.dtype, original)\n codes = ensure_platform_int(codes)\n return (codes, uniques)", + "docstring": "Factorize a numpy array to codes and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. size_hint : int, optional Passed through to the hashtable's 'get_labels' method na_value : object, optional A value in to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). mask : ndarray[bool], optional If not None, the mask is used as indicator for missing values (True = missing, False = valid) instead of or condition \"val != val\". Returns ------- codes : ndarray[np.intp] uniques : ndarray", + "type": "function", + "file_path": "pandas\\pandas\\core\\algorithms.py", + "ast_data": "FunctionDef name:factorize_array arg:values arg:use_na_sentinel arg:size_hint arg:na_value arg:mask arguments arg arg arg arg arg Assign If Compare Assign Assign Call Assign Call BoolOp Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_is_input_large_scalar", + "source_code": "def _is_input_large_scalar(node: Node, gm: torch.fx.GraphModule):\n if node.op == 'get_attr':\n qualified_name = str(node.target)\n module_path, _, name = qualified_name.rpartition('.')\n submod = gm.get_submodule(module_path)\n tensor = getattr(submod, name)\n HISTC_UPPER_BOUND = 3402823500000000.0\n return tensor.numel() == 1 and abs(tensor.item()) > HISTC_UPPER_BOUND\n return False", + "docstring": "Check if input is a large scalar value. So that we can skip quantization for the node since histc op (in HistogramObserver) only works for values up to certain upper bound", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py", + "ast_data": "FunctionDef name:_is_input_large_scalar arg:node arg:gm arguments arg arg If Compare Assign Call Assign Call Assign Call Assign Call Assign Return return:yes BoolOp Compare Call Compare Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "composite_images", + "source_code": "def composite_images(images, renderer, magnification=1.0):\n if len(images) == 0:\n return (np.empty((0, 0, 4), dtype=np.uint8), 0, 0)\n parts = []\n bboxes = []\n for image in images:\n data, x, y, trans = image.make_image(renderer, magnification)\n if data is not None:\n x *= magnification\n y *= magnification\n parts.append((data, x, y, image._get_scalar_alpha()))\n bboxes.append(Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))\n if len(parts) == 0:\n return (np.empty((0, 0, 4), dtype=np.uint8), 0, 0)\n bbox = Bbox.union(bboxes)\n output = np.zeros((int(bbox.height), int(bbox.width), 4), dtype=np.uint8)\n for data, x, y, alpha in parts:\n trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)\n _image.resample(data, output, trans, _image.NEAREST, resample=False, alpha=alpha)\n return (output, bbox.x0 / magnification, bbox.y0 / magnification)", + "docstring": "Composite a number of RGBA images into one. The images are composited in the order in which they appear in the *images* list. Parameters ---------- images : list of Images Each must have a method. For each image, should return , though this is not enforced by this function. Each image must have a purely affine transformation with no shear. renderer : magnification : float, default: 1 The additional magnification to apply for the renderer in use. Returns ------- image : (M, N, 4) array The composited RGBA image. offset_x, offset_y : float The (left, bottom) offset where the composited image should be placed in the output figure.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:composite_images arg:images arg:renderer arg:magnification arguments arg arg arg If Compare Call Return return:yes Call Assign Assign For Assign Call If Compare Call Call Call Call If Compare Call Return return:yes Call Assign Call Assign Call Call Call For Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_pretty_print_target", + "source_code": "@staticmethod\ndef _pretty_print_target(target: object) -> str:\n if isinstance(target, str):\n return target\n if hasattr(target, '__module__'):\n name = getattr(target, '__name__', None)\n if name is None:\n return _get_qualified_name(target)\n if target.__module__ == 'builtins':\n return f'builtins.{name}'\n elif target.__module__ == '_operator':\n return f'operator.{name}'\n return _get_qualified_name(target)", + "docstring": "Make target printouts more user-friendly. 1) builtins will be printed as 2) operators will be printed as 3) other callables will be printed with qualified name, e.g. torch.add", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:_pretty_print_target arg:target arguments arg If Call Return return:yes If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "bucketize", + "source_code": "def bucketize(self, values: T, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: T, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[T]=None) -> None:\n self._reads.add(StarDep(boundaries[0]))\n if sorter is not None:\n self._reads.add(StarDep(sorter[0]))", + "docstring": "Records the names of the buffers that bucketize will read from.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\dependencies.py", + "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Call Call If Compare Call Call" + }, + { + "library": "scipy", + "name": "logsf", + "source_code": "def logsf(self, k, *args, **kwds):\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n _a, _b = self._get_support(*args)\n k = asarray(k - loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k < _a) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(-inf)\n place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *(k,) + args)\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output", + "docstring": "Log of the survival function of the given RV. Returns the log of the \"survival function,\" defined as 1 - , evaluated at . Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logsf : ndarray Log of the survival function evaluated at .", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:logsf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Assign Compare Assign Assign Call Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "def _get_estimator(self):\n if self.estimator is None:\n return DecisionTreeRegressor()\n return self.estimator", + "docstring": "Resolve which estimator to return (default is DecisionTreeClassifier)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg If Compare Return return:yes Call Return return:yes" + }, + { + "library": "virtualenv", + "name": "reset", + "source_code": "def reset(self):\n pass", + "docstring": "This is a temporary folder, is already empty to start with.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\via_tempdir.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "set_global_generator", + "source_code": "@tf_export('random.set_global_generator', 'random.experimental.set_global_generator')\ndef set_global_generator(generator):\n global global_generator\n global_generator = generator", + "docstring": "Replaces the global generator with another object. This function replaces the global generator with the provided object. A random number generator utilizes a object to store its state. The user shall be aware of caveats how interacts with : - tf.function puts restrictions on Variable creation thus one cannot freely create a new random generator instance inside . To call inside , the generator instance must have already been created eagerly. - tf.function captures the Variable during trace-compilation, thus a compiled f.function will not be affected as demonstrated by random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun . For most use cases, avoid calling after program initialization, and prefer to reset the state of the existing global generator instead, such as, >>> rng = tf.random.get_global_generator() >>> rng.reset_from_seed(30) Args: generator: the new object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:set_global_generator arg:generator arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "dropout", + "source_code": "def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):\n warnings.warn('`tf.layers.dropout` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Dropout` instead.')\n layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)\n return layer.apply(inputs, training=training)", + "docstring": "Applies Dropout to the input. Dropout consists in randomly setting a fraction of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by , so that their sum is unchanged at training time and inference time. Args: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. \"rate=0.1\" would drop out 10% of input units. noise_shape: 1D tensor of type representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape , and you want the dropout mask to be the same for all timesteps, you can use . seed: A Python integer. Used to create random seeds. See for behavior. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (apply dropout) or in inference mode (return the input untouched). name: The name of the layer (string). Returns: Output tensor. Raises: ValueError: if eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py", + "ast_data": "FunctionDef name:dropout arg:inputs arg:rate arg:noise_shape arg:seed arg:training arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "get_meson_info", + "source_code": "def get_meson_info():\n build_path = Path('build/introspect')\n subprocess.check_call(['meson', 'setup', build_path, '--reconfigure'])\n json_out = subprocess.check_output(['meson', 'introspect', build_path, '--targets'], text=True)\n target_list = json.loads(json_out)\n meson_targets = [target for target in target_list if has_openmp_flags(target)]\n return [get_canonical_name_meson(each, build_path) for each in meson_targets]", + "docstring": "Return names of extension that use OpenMP based on meson introspect output. The meson introspect json info is a list of targets where a target is a dict that looks like this (parts not used in this script are not shown for simplicity): { 'name': '_k_means_elkan.cpython-312-x86_64-linux-gnu', 'filename': [ '/sklearn/cluster/_k_means_elkan.cpython-312-x86_64-linux-gnu.so' ], 'target_sources': [ { 'compiler': ['ccache', 'cc'], 'parameters': [ '-Wall', '-std=c11', '-fopenmp', ... ], ... }, { 'linker': ['cc'], 'parameters': [ '-shared', '-fPIC', '-fopenmp', ... ] } ] }", + "type": "function", + "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py", + "ast_data": "FunctionDef name:get_meson_info arguments Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "boxcox_normplot", + "source_code": "def boxcox_normplot(x, la, lb, plot=None, N=80):\n return _normplot('boxcox', x, la, lb, plot, N)", + "docstring": "Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the `boxcoxplotmatplotlib.pyplotlalbprobplotxplotboxcox_normplotprobplot` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_morestats.py", + "ast_data": "FunctionDef name:boxcox_normplot arg:x arg:la arg:lb arg:plot arg:N arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "last_executed_query", + "source_code": "def last_executed_query(self, cursor, sql, params):\n\n def to_string(s):\n return force_str(s, strings_only=True, errors='replace')\n if isinstance(params, (list, tuple)):\n u_params = tuple((to_string(val) for val in params))\n elif params is None:\n u_params = ()\n else:\n u_params = {to_string(k): to_string(v) for k, v in params.items()}\n return 'QUERY = %r - PARAMS = %r' % (sql, u_params)", + "docstring": "Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. is the raw query containing placeholders and is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:last_executed_query arg:self arg:cursor arg:sql arg:params arguments arg arg arg arg FunctionDef name:to_string arg:s arguments arg Return return:yes Call If Call Assign Call Call If Compare Assign Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TrainingTarget", + "source_code": "class _TrainingTarget(object):\n\n def __init__(self, target, feedable=False, skip_target_weights=True):\n self._target = target\n self._feedable = feedable\n self._skip_target_weights = skip_target_weights\n\n @property\n def target(self):\n return self._target\n\n @property\n def feedable(self):\n return self._feedable\n\n @property\n def skip_target_weights(self):\n return self._skip_target_weights", + "docstring": "Container for a target tensor (y_true) and its metadata (shape, loss...). Args: target: A target tensor for the model. It may be if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in or ), or not (model compiled with argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "ClassDef name:_TrainingTarget FunctionDef name:__init__ arg:self arg:target arg:feedable arg:skip_target_weights arguments arg arg arg arg Assign Assign Assign FunctionDef name:target arg:self arguments arg Return return:yes FunctionDef name:feedable arg:self arguments arg Return return:yes FunctionDef name:skip_target_weights arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "Basic", + "source_code": "class Basic(NoValue):\n QN = 'Qualified name, as it appeared in the code. See qual_names.py.'\n SKIP_PROCESSING = 'This node should be preserved as is and not processed any further.'\n INDENT_BLOCK_REMAINDER = 'When a node is annotated with this, the remainder of the block should be indented below it. The annotation contains a tuple (new_body, name_map), where `new_body` is the new indented block and `name_map` allows renaming symbols.'\n ORIGIN = 'Information about the source code that converted code originated from. See origin_information.py.'\n DIRECTIVES = 'User directives associated with a statement or a variable. Typically, they affect the immediately-enclosing statement.'\n EXTRA_LOOP_TEST = 'A special annotation containing additional test code to be executed in for loops.'", + "docstring": "Container for basic annotation keys. The enum values are used strictly for documentation purposes.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\anno.py", + "ast_data": "ClassDef name:Basic Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "get_partition_cudagraph_metadata", + "source_code": "def get_partition_cudagraph_metadata(partition_map: GraphPartitionMap, metadata: CudagraphMetadata) -> CudagraphMetadata:\n partition_placeholders = []\n partition_static_input_idxs: OrderedSet[int] = OrderedSet()\n partition_mutated_input_idxs: OrderedSet[int] = OrderedSet()\n for partition_input_idx, graph_input_idx in enumerate(partition_map.input_index_mapping):\n if graph_input_idx in metadata.static_input_idxs:\n partition_static_input_idxs.add(partition_input_idx)\n if graph_input_idx in metadata.mutated_input_idxs:\n partition_mutated_input_idxs.add(partition_input_idx)\n if graph_input_idx is not None:\n placeholder = metadata.placeholders[graph_input_idx]\n else:\n placeholder = PlaceholderInfo(name=f'partition_{partition_map.id}_placeholder_{partition_input_idx}', stack_trace=None, users=[], mutating_use_stack_trace=None)\n partition_placeholders.append(placeholder)\n partition_stack_traces = []\n for graph_output_idx in partition_map.output_index_mapping:\n if graph_output_idx is not None:\n partition_stack_traces.append(metadata.stack_traces[graph_output_idx])\n else:\n partition_stack_traces.append(None)\n partition_constants = {name: metadata.constants[name] for name in partition_map.constant_names}\n return CudagraphMetadata(partition_placeholders, partition_static_input_idxs, partition_mutated_input_idxs, partition_stack_traces, partition_constants)", + "docstring": "Convert the cudagraph metadata at the graph level to the graph partition level, given the graph partition info (i.e., mapping from partition input/output index to graph input/output index).", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py", + "ast_data": "FunctionDef name:get_partition_cudagraph_metadata arg:partition_map arg:metadata arguments arg arg Assign Call Call For Call If Compare Call If Compare Call If Compare Assign Assign Call Call Assign For If Compare Call Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "OptimizerSingleTensorPattern", + "source_code": "class OptimizerSingleTensorPattern(Pattern):\n\n def __init__(self, prof: profile, should_benchmark: bool=False):\n super().__init__(prof, should_benchmark)\n self.name = 'Optimizer Single Tensor Pattern'\n self.optimizers_with_foreach = ['adam', 'sgd', 'adamw']\n self.description = \"Deteced optimizer running with single tensor implementation. Please enable multi tensor implementation by passing 'foreach=True' into optimizer.\"\n self.url = ''\n\n def match(self, event: _ProfilerEvent):\n for optimizer in self.optimizers_with_foreach:\n if event.name.endswith(f'_single_tensor_{optimizer}'):\n return True\n return False", + "docstring": "This pattern identifies if we are using the single-tensor version of an optimizer. example: optimizer = torch.optim.SGD(model.parameters(), lr=0.1) By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when the kernels are relatively small. Pattern: XXXXX: _single_tenser_ Algorithm: String match", + "type": "class", + "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py", + "ast_data": "ClassDef name:OptimizerSingleTensorPattern FunctionDef name:__init__ arg:self arg:prof arg:should_benchmark arguments arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:match arg:self arg:event arguments arg arg For If Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "_deprecated", + "source_code": "def _deprecated(msg, stacklevel=2):\n\n def wrap(fun):\n if isinstance(fun, type):\n warnings.warn(f'Trying to deprecate class {fun!r}', category=RuntimeWarning, stacklevel=2)\n return fun\n\n @functools.wraps(fun)\n def call(*args, **kwargs):\n warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)\n return fun(*args, **kwargs)\n call.__doc__ = fun.__doc__\n return call\n return wrap", + "docstring": "Deprecate a function by emitting a warning on use.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\deprecation.py", + "ast_data": "FunctionDef name:_deprecated arg:msg arg:stacklevel arguments arg arg FunctionDef name:wrap arg:fun arguments arg If Call Call Return return:yes FunctionDef name:call arguments arg arg Call Return return:yes Call Call Assign Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "max_x", + "source_code": "@property\ndef max_x(self):\n return self._envelope.MaxX", + "docstring": "Return the value of the maximum X coordinate.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py", + "ast_data": "FunctionDef name:max_x arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_scale_norm", + "source_code": "def _scale_norm(self, norm, vmin, vmax, A):\n if vmin is not None or vmax is not None:\n self.set_clim(vmin, vmax)\n if isinstance(norm, colors.Normalize):\n raise ValueError('Passing a Normalize instance simultaneously with vmin/vmax is not supported. Please pass vmin/vmax directly to the norm when creating it.')\n self.autoscale_None(A)", + "docstring": "Helper for initial scaling. Used by public functions that create a ScalarMappable and support parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm* will take precedence over *vmin*, *vmax*. Note that this method does not set the norm.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:_scale_norm arg:self arg:norm arg:vmin arg:vmax arg:A arguments arg arg arg arg arg If BoolOp Compare Compare Call If Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "clear", + "source_code": "def clear(self):\n self.mismatch_error = None\n self.pt_outs = None\n self._onnx_graph = None\n self.upper_graph_info = None\n self.lower_graph_info = None", + "docstring": "Clear states and results of previous verification.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Assign Assign Assign Assign" + }, + { + "library": "pandas", + "name": "select_column", + "source_code": "def select_column(self, key: str, column: str, start: int | None=None, stop: int | None=None):\n tbl = self.get_storer(key)\n if not isinstance(tbl, Table):\n raise TypeError('can only read_column with a table')\n return tbl.read_column(column=column, start=start, stop=stop)", + "docstring": "return a single column from the table. This is generally only useful to select an indexable .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the \"fixed\" format. Loading pickled data received from untrusted sources can be unsafe. See: for more. Parameters ---------- key : str column : str The column of interest. start : int or None, default None stop : int or None, default None Raises ------ raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block)", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:select_column arg:self arg:key arg:column arg:start arg:stop arguments arg arg arg arg arg Assign Call If Call Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_bbox_to_anchor", + "source_code": "def get_bbox_to_anchor(self):\n if self._bbox_to_anchor is None:\n return self.axes.bbox\n else:\n transform = self._bbox_to_anchor_transform\n if transform is None:\n return self._bbox_to_anchor\n else:\n return TransformedBbox(self._bbox_to_anchor, transform)", + "docstring": "Return the bbox that the box is anchored to.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_bbox_to_anchor arg:self arguments arg If Compare Return return:yes Assign If Compare Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "euler_from_quaternion", + "source_code": "def euler_from_quaternion(w: Tensor, x: Tensor, y: Tensor, z: Tensor) -> tuple[Tensor, Tensor, Tensor]:\n KORNIA_CHECK(w.shape == x.shape)\n KORNIA_CHECK(x.shape == y.shape)\n KORNIA_CHECK(y.shape == z.shape)\n yy = y * y\n sinr_cosp = 2.0 * (w * x + y * z)\n cosr_cosp = 1.0 - 2.0 * (x * x + yy)\n roll = sinr_cosp.atan2(cosr_cosp)\n sinp = 2.0 * (w * y - z * x)\n sinp = sinp.clamp(min=-1.0, max=1.0)\n pitch = sinp.asin()\n siny_cosp = 2.0 * (w * z + x * y)\n cosy_cosp = 1.0 - 2.0 * (yy + z * z)\n yaw = siny_cosp.atan2(cosy_cosp)\n return (roll, pitch, yaw)", + "docstring": "Convert a quaternion coefficients to Euler angles. Returned angles are in radians in XYZ convention. Args: w: quaternion :math: coefficient. x: quaternion :math: coefficient. y: quaternion :math: coefficient. z: quaternion :math: coefficient. Return: A tuple with euler angles, , .", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:euler_from_quaternion arg:w arg:x arg:y arg:z arguments arg arg arg arg Call Compare Call Compare Call Compare Assign Assign Assign Assign Call Assign Assign Call Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "OrderedDictWrapper", + "source_code": "class OrderedDictWrapper:\n\n def __init__(self, cpp_module, attr):\n self.cpp_module = cpp_module\n self.attr = attr\n\n @property\n def cpp_dict(self):\n return getattr(self.cpp_module, self.attr)\n\n def items(self):\n return self.cpp_dict.items()\n\n def keys(self):\n return self.cpp_dict.keys()\n\n def values(self):\n return self.cpp_dict.values()\n\n def __iter__(self):\n return self.cpp_dict.__iter__()\n\n def __len__(self):\n return self.cpp_dict.__len__()\n\n def __contains__(self, key):\n return self.cpp_dict.__contains__(key)\n\n def __getitem__(self, key):\n return self.cpp_dict.__getitem__(key)", + "docstring": "A wrapper around a C++ OrderedDict. It dynamically evaluates the OrderedDict getter on a bound C++ module, such that new changes on the C++ side are picked up. Otherwise accessing e.g. `` so using properties does not work.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\cpp.py", + "ast_data": "ClassDef name:OrderedDictWrapper FunctionDef name:__init__ arg:self arg:cpp_module arg:attr arguments arg arg arg Assign Assign FunctionDef name:cpp_dict arg:self arguments arg Return return:yes Call FunctionDef name:items arg:self arguments arg Return return:yes Call FunctionDef name:keys arg:self arguments arg Return return:yes Call FunctionDef name:values arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "trace_save_and_restore", + "source_code": "def trace_save_and_restore(obj):\n legacy_name = saveable_compat.get_saveable_name(obj)\n obj_save_fn = obj._serialize_to_tensors\n obj_restore_fn = obj._restore_from_tensors\n if isinstance(obj_save_fn, defun.ConcreteFunction):\n concrete_save = obj_save_fn\n else:\n\n @def_function.function\n def save_fn():\n tensor_dict = obj_save_fn()\n if any((isinstance(v, tensor_callable.Callable) for v in tensor_dict.values())):\n raise NotImplementedError(f'Unable to export SavedModel with object of type {type(obj)} because it returns a Callable in `_serialize_to_tensors`. If you need this functionality please file a feature request.')\n if legacy_name:\n return {f'{legacy_name}{key}': value for key, value in tensor_dict.items()}\n return tensor_dict\n concrete_save = save_fn.get_concrete_function()\n if isinstance(obj_restore_fn, defun.ConcreteFunction):\n concrete_restore = obj_restore_fn\n else:\n\n @def_function.function\n def restore_fn(restored_tensors):\n if legacy_name:\n restored_tensors = {key[len(legacy_name):]: value for key, value in restored_tensors.items()}\n obj_restore_fn(restored_tensors)\n concrete_restore = restore_fn.get_concrete_function(concrete_save.structured_outputs)\n return (concrete_save, concrete_restore)", + "docstring": "Traces serialize- and restore-from-tensors functions. Args: obj: A object. Returns: A concrete Function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\tracing_utils.py", + "ast_data": "FunctionDef name:trace_save_and_restore arg:obj arguments arg Assign Call Assign Assign If Call Assign FunctionDef name:save_fn arguments Assign Call If Call Call Call Raise Call Call If Return return:yes Call Return return:yes Assign Call If Call Assign FunctionDef name:restore_fn arg:restored_tensors arguments arg If Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "differentiable_polynomial_floor", + "source_code": "def differentiable_polynomial_floor(input: Tensor) -> Tensor:\n input_floor = input.floor()\n output: Tensor = input_floor + (input - 0.5 - input_floor) ** 3\n return output", + "docstring": "Perform floor via a differentiable operation. Args: input (Tensor): Input tensor of any shape to be floored. Returns: output (Tensor): Pseudo rounded tensor of the same shape as input tensor.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\misc.py", + "ast_data": "FunctionDef name:differentiable_polynomial_floor arg:input arguments arg Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "should_extension_dispatch", + "source_code": "def should_extension_dispatch(left: ArrayLike, right: Any) -> bool:\n return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray)", + "docstring": "Identify cases where Series operation should dispatch to ExtensionArray method. Parameters ---------- left : np.ndarray or ExtensionArray right : object Returns ------- bool", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\dispatch.py", + "ast_data": "FunctionDef name:should_extension_dispatch arg:left arg:right arguments arg arg Return return:yes BoolOp Call Call" + }, + { + "library": "pandas", + "name": "putmask", + "source_code": "def putmask(self, mask, value: MultiIndex) -> MultiIndex:\n mask, noop = validate_putmask(self, mask)\n if noop:\n return self.copy()\n if len(mask) == len(value):\n subset = value[mask].remove_unused_levels()\n else:\n subset = value.remove_unused_levels()\n new_levels = []\n new_codes = []\n for i, (value_level, level, level_codes) in enumerate(zip(subset.levels, self.levels, self.codes)):\n new_level = level.union(value_level, sort=False)\n value_codes = new_level.get_indexer_for(subset.get_level_values(i))\n new_code = ensure_int64(level_codes)\n new_code[mask] = value_codes\n new_levels.append(new_level)\n new_codes.append(new_code)\n return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)", + "docstring": "Return a new MultiIndex of the values set with the mask. Parameters ---------- mask : array like value : MultiIndex Must either be the same length as self or length one Returns ------- MultiIndex", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:putmask arg:self arg:mask arg:value arguments arg arg arg Assign Call If Return return:yes Call If Compare Call Call Assign Call Assign Call Assign Assign For Call Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "format_all", + "source_code": "@staticmethod\ndef format_all(tbs):\n import torch._C._profiler\n rs: list[Optional[list[str]]] = []\n delayed_idxs = []\n for i, tb in enumerate(tbs):\n if tb.tb is None:\n rs.append([])\n else:\n rs.append(None)\n delayed_idxs.append(i)\n torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])\n for i in delayed_idxs:\n rs[i] = traceback.format_list(tbs[i].summary())\n return rs", + "docstring": "Bulk version of CapturedTraceback.format. Returns a list of list of strings.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_traceback.py", + "ast_data": "FunctionDef name:format_all arg:tbs arguments arg Assign For Call If Compare Call Call Call Call For Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "mem_get_info", + "source_code": "def mem_get_info(device: _device_t=None) -> tuple[int, int]:\n device = _get_device_index(device, optional=True)\n return torch._C._xpu_getMemoryInfo(device)", + "docstring": "Return the global free and total GPU memory for a given device. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Returns: int: the memory available on the device in units of bytes. int: the total memory on the device in units of bytes", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\memory.py", + "ast_data": "FunctionDef name:mem_get_info arg:device arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n self.infer_axes()\n s = self.shape\n if s is not None:\n if isinstance(s, (list, tuple)):\n jshape = ','.join([pprint_thing(x) for x in s])\n s = f'[{jshape}]'\n return f'{self.pandas_type:12.12} (shape->{s})'\n return self.pandas_type", + "docstring": "return a pretty representation of myself", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Call Assign If Compare If Call Assign Call Call Assign Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "fix_node_def", + "source_code": "def fix_node_def(node_def, functions, shared_name_suffix):\n if node_def.op in functions:\n node_def.op = functions[node_def.op].name\n for _, attr_value in node_def.attr.items():\n if attr_value.WhichOneof('value') == 'func':\n attr_value.func.name = functions[attr_value.func.name].name\n elif attr_value.WhichOneof('value') == 'list':\n for fn in attr_value.list.func:\n fn.name = functions[fn.name].name\n if node_def.op == 'HashTableV2':\n if 'use_node_name_sharing' not in node_def.attr or not node_def.attr['use_node_name_sharing'].b:\n node_def.attr['use_node_name_sharing'].b = True\n shared_name_suffix += '_{}'.format(ops.uid())\n op_def = op_def_registry.get(node_def.op)\n if op_def:\n attr = next((a for a in op_def.attr if a.name == 'shared_name'), None)\n if attr:\n shared_name = None\n if 'shared_name' in node_def.attr and node_def.attr['shared_name'].s:\n shared_name = node_def.attr['shared_name'].s\n elif attr.default_value.s:\n shared_name = compat.as_bytes(attr.default_value.s)\n if not shared_name:\n shared_name = compat.as_bytes(node_def.name)\n node_def.attr['shared_name'].s = shared_name + compat.as_bytes(shared_name_suffix)", + "docstring": "Replace functions calls and shared names in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:fix_node_def arg:node_def arg:functions arg:shared_name_suffix arguments arg arg arg If Compare Assign For Call If Compare Call Assign If Compare Call For Assign If Compare If BoolOp Compare Assign Call Call Assign Call If Assign Call Compare If Assign If BoolOp Compare Assign If Assign Call If Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_get_liveness", + "source_code": "@staticmethod\ndef _get_liveness(weakrefs: list[list[Optional[StorageWeakRefWrapper]]]) -> list[list[bool]]:\n if len(weakrefs) == 0:\n return []\n return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]", + "docstring": "Maps weakrefs to true if the reference is alive and false otherwise", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:_get_liveness arg:weakrefs arguments arg If Compare Call Return return:no Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_get_counts", + "source_code": "def _get_counts(values, uniques):\n if values.dtype.kind in 'OU':\n counter = _NaNCounter(values)\n output = np.zeros(len(uniques), dtype=np.int64)\n for i, item in enumerate(uniques):\n with suppress(KeyError):\n output[i] = counter[item]\n return output\n unique_values, counts = _unique_np(values, return_counts=True)\n uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)\n if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):\n uniques_in_values[-1] = True\n unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])\n output = np.zeros_like(uniques, dtype=np.int64)\n output[uniques_in_values] = counts[unique_valid_indices]\n return output", + "docstring": "Get the count of each of the in . The counts will use the order passed in by . For non-object dtypes, is assumed to be sorted and is at the end.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_encode.py", + "ast_data": "FunctionDef name:_get_counts arg:values arg:uniques arguments arg arg If Compare Assign Call Assign Call Call For Call With Call Assign Return return:yes Assign Call Assign Call If BoolOp Call Call Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_filter_exception", + "source_code": "def _filter_exception(self, ex):\n if isinstance(ex, tuple):\n ex2 = ex[1]\n else:\n ex2 = ex\n if isinstance(ex2, self._clean_stop_exception_types):\n ex = None\n return ex", + "docstring": "Check if the exception indicated in 'ex' should be ignored. This method examines to check if it is an exception that should be reported to the users. If yes, it returns as is, otherwise it returns None. The code returns None for exception types listed in . Args: ex: None, an , or a Python tuple as returned by . Returns: ex or None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:_filter_exception arg:self arg:ex arguments arg arg If Call Assign Assign If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self) -> saver_pb2.SaverDef:\n filename_tensor = array_ops.placeholder(shape=[], dtype=dtypes.string, name='saver_filename')\n save_tensor = self._traced_save(filename_tensor)\n restore_op = self._traced_restore(filename_tensor).op\n return saver_pb2.SaverDef(filename_tensor_name=filename_tensor.name, save_tensor_name=save_tensor.name, restore_op_name=restore_op.name, version=saver_pb2.SaverDef.V2)", + "docstring": "Serializes to a SaverDef referencing the current graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py", + "ast_data": "FunctionDef name:to_proto arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "release_lock", + "source_code": "def release_lock(self):\n self.locks[self.id].release()\n self.locked = False", + "docstring": "Release the lock on the currently-loaded session data.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:release_lock arg:self arguments arg Call Assign" + }, + { + "library": "pytorch", + "name": "replace_pattern_with_filters", + "source_code": "@compatibility(is_backward_compatible=False)\ndef replace_pattern_with_filters(gm: GraphModule, pattern: Union[Callable, Graph, GraphModule], replacement: Union[Callable, Graph, GraphModule, None]=None, match_filters: Optional[list[Callable[['InternalMatch', Graph, Graph], bool]]]=None, ignore_literals: bool=False, replacement_callback: Optional[Callable[['InternalMatch', Graph, Graph], Graph]]=None) -> list[ReplacedPatterns]:\n return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals, replacement_callback)", + "docstring": "See replace_pattern for documentation. This function is an overload with an additional match_filter argument. Args: ``: A function that takes in a match and returns a Graph to be used as the replacement. This allows you to construct a replacement graph based on the match.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\subgraph_rewriter.py", + "ast_data": "FunctionDef name:replace_pattern_with_filters arg:gm arg:pattern arg:replacement arg:match_filters arg:ignore_literals arg:replacement_callback arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_has_same_id_matched_objs", + "source_code": "def _has_same_id_matched_objs(frame: DynamoFrameType, cache_entry) -> bool:\n if not cache_entry:\n return False\n for local_name, weakref_from_cache_entry in cache_entry.guard_manager.id_matched_objs.items():\n if weakref_from_cache_entry() is not None:\n weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)\n if weakref_from_frame is not weakref_from_cache_entry:\n return False\n return True", + "docstring": "Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones in frame.f_locals.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\cache_size.py", + "ast_data": "FunctionDef name:_has_same_id_matched_objs arg:frame arg:cache_entry arguments arg arg If Return return:yes For Call If Compare Call Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "invres", + "source_code": "def invres(r, p, k, tol=0.001, rtype='avg'):\n r = np.atleast_1d(r)\n p = np.atleast_1d(p)\n k = np.trim_zeros(np.atleast_1d(k), 'f')\n unique_poles, multiplicity = _group_poles(p, tol, rtype)\n factors, denominator = _compute_factors(unique_poles, multiplicity, include_powers=True)\n if len(k) == 0:\n numerator = 0\n else:\n numerator = np.polymul(k, denominator)\n for residue, factor in zip(r, factors):\n numerator = np.polyadd(numerator, residue * factor)\n return (numerator, denominator)", + "docstring": "Compute b(s) and a(s) from partial fraction expansion. If is the degree of numerator and the degree of denominator :: b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] H(s) = ------ = ------------------------------------------ a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] then the partial-fraction expansion H(s) is defined as:: r[0] r[1] r[-1] = -------- + -------- + ... + --------- + k(s) (s-p[0]) (s-p[1]) (s-p[-1]) If there are any repeated roots (closer together than ), then H(s) has terms like:: r[i] r[i+1] r[i+n-1] -------- + ----------- + ... + ----------- (s-p[i]) (s-p[i])**2 (s-p[i])**n This function is used for polynomials in positive powers of s or z, such as analog filters or digital filters in controls engineering. For negative powers of z (typical for digital filters in DSP), use . Parameters ---------- r : array_like Residues corresponding to the poles. For repeated poles, the residues must be ordered to correspond to ascending by power fractions. p : array_like Poles. Equal poles must be adjacent. k : array_like Coefficients of the direct polynomial term. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See for further details. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. See Also -------- residue, invresz, unique_roots", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_signaltools.py", + "ast_data": "FunctionDef name:invres arg:r arg:p arg:k arg:tol arg:rtype arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call If Compare Call Assign Assign Call For Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_replace_columnwise", + "source_code": "def _replace_columnwise(self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex) -> Self | None:\n res = self if inplace else self.copy(deep=False)\n ax = self.columns\n for i, ax_value in enumerate(ax):\n if ax_value in mapping:\n ser = self.iloc[:, i]\n target, value = mapping[ax_value]\n newobj = ser.replace(target, value, regex=regex)\n res._iset_item(i, newobj, inplace=inplace)\n if inplace:\n return None\n return res.__finalize__(self)", + "docstring": "Dispatch to Series.replace column-wise. Parameters ---------- mapping : dict of the form {col: (target, value)} inplace : bool regex : bool or same types as in DataFrame.replace Returns ------- DataFrame or None", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_replace_columnwise arg:self arg:mapping arg:inplace arg:regex arguments arg arg arg arg Assign Call Assign For Call If Compare Assign Assign Assign Call Call If Return return:no Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "register_tf_serializable", + "source_code": "def register_tf_serializable(name=None, predicate=None):\n return register_serializable(package='tf', name=name, predicate=predicate)", + "docstring": "See the docstring for .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\__init__.py", + "ast_data": "FunctionDef name:register_tf_serializable arg:name arg:predicate arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "NoSampleWeightWrapper", + "source_code": "class NoSampleWeightWrapper(BaseEstimator):\n\n def __init__(self, est=None):\n self.est = est\n\n def fit(self, X, y):\n return self.est.fit(X, y)\n\n def predict(self, X):\n return self.est.predict(X)\n\n def predict_proba(self, X):\n return self.est.predict_proba(X)\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags._skip_test = True\n return tags", + "docstring": "Wrap estimator which will not expose . Parameters ---------- est : estimator, default=None The estimator to wrap.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py", + "ast_data": "ClassDef name:NoSampleWeightWrapper FunctionDef name:__init__ arg:self arg:est arguments arg arg Assign FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "variable", + "source_code": "@property\ndef variable(self):\n return self._variable", + "docstring": "The name of the polynomial variable", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py", + "ast_data": "FunctionDef name:variable arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit_predict", + "source_code": "def fit_predict(self, X, y=None):\n return super().fit_predict(X, y)", + "docstring": "Perform spectral clustering on and return cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, similarities / affinities between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_spectral.py", + "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "stack_inputs", + "source_code": "def stack_inputs(self, stack_indices=None, tile_variants=False):\n if stack_indices is None:\n stack_indices = range(len(self._inputs))\n length = self.pfor.loop_len_vector\n for i in stack_indices:\n inp = self._inputs[i]\n is_variant = inp.t.dtype == dtypes.variant\n if not inp.is_stacked:\n self._inputs[i] = _stack(inp.t, length)\n if tile_variants and is_variant:\n self._inputs[i] = wrap(_tile_variant_with_length(self._inputs[i].t, length), True)\n elif not tile_variants and is_variant:\n self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)", + "docstring": "Stacks unstacked inputs at . Args: stack_indices: indices of inputs at which stacking is done. If None, stacking is done at all indices. tile_variants: If True, affected indices which have a variant dtype will be tiled after this operation to match the expected shape of a vectorized tensor. Variants generally need to be un-tiled when they are inputs to operations and tiled when returned.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:stack_inputs arg:self arg:stack_indices arg:tile_variants arguments arg arg arg If Compare Assign Call Call Assign For Assign Assign Compare If Assign Call If BoolOp Assign Call Call If BoolOp Assign Call Call" + }, + { + "library": "pytorch", + "name": "_RendezvousKeepAliveOp", + "source_code": "class _RendezvousKeepAliveOp:\n\n def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\n if _should_keep_alive(ctx):\n if time.monotonic() > deadline:\n return _Action.ERROR_TIMEOUT\n return _Action.KEEP_ALIVE\n return _Action.FINISH", + "docstring": "Represent a rendezvous keep-alive update operation.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "ClassDef name:_RendezvousKeepAliveOp FunctionDef name:__call__ arg:self arg:ctx arg:deadline arguments arg arg arg If Call If Compare Call Return return:yes Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "transform_path_affine", + "source_code": "def transform_path_affine(self, path):\n return self.get_affine().transform_path_affine(path)", + "docstring": "Apply the affine part of this transform to *path*, returning a new . ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:transform_path_affine arg:self arg:path arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "union_with_duplicates", + "source_code": "def union_with_duplicates(lvals: ArrayLike | Index, rvals: ArrayLike | Index) -> ArrayLike | Index:\n from pandas import Series\n l_count = value_counts_internal(lvals, dropna=False)\n r_count = value_counts_internal(rvals, dropna=False)\n l_count, r_count = l_count.align(r_count, fill_value=0)\n final_count = np.maximum(l_count.values, r_count.values)\n final_count = Series(final_count, index=l_count.index, dtype='int', copy=False)\n if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):\n unique_vals = lvals.append(rvals).unique()\n else:\n if isinstance(lvals, ABCIndex):\n lvals = lvals._values\n if isinstance(rvals, ABCIndex):\n rvals = rvals._values\n combined = concat_compat([lvals, rvals])\n unique_vals = unique(combined)\n unique_vals = ensure_wrapped_if_datetimelike(unique_vals)\n repeats = final_count.reindex(unique_vals).values\n return np.repeat(unique_vals, repeats)", + "docstring": "Extracts the union from lvals and rvals with respect to duplicates and nans in both arrays. Parameters ---------- lvals: np.ndarray or ExtensionArray left values which is ordered in front. rvals: np.ndarray or ExtensionArray right values ordered after lvals. Returns ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. Notes ----- Caller is responsible for ensuring lvals.dtype == rvals.dtype.", + "type": "function", + "file_path": "pandas\\pandas\\core\\algorithms.py", + "ast_data": "FunctionDef name:union_with_duplicates arg:lvals arg:rvals arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Call Call If Call Assign If Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "set_laf_orientation", + "source_code": "def set_laf_orientation(LAF: Tensor, angles_degrees: Tensor) -> Tensor:\n KORNIA_CHECK_LAF(LAF)\n B, N = LAF.shape[:2]\n ori = get_laf_orientation(LAF).reshape_as(angles_degrees)\n return rotate_laf(LAF, angles_degrees - ori)", + "docstring": "Change the orientation of the LAFs. Args: LAF: :math: angles_degrees: :math: in degrees. Returns: LAF oriented with angles :math:", + "type": "function", + "file_path": "kornia\\kornia\\feature\\laf.py", + "ast_data": "FunctionDef name:set_laf_orientation arg:LAF arg:angles_degrees arguments arg arg Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "body", + "source_code": "def body(i, *args):\n del args\n fn_result = fn(ctx, iterator.get_next())\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs", + "docstring": "A wrapper around to create the while loop body.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:body arg:i arguments arg arg Assign Call Call Assign Call With Call Return return:yes" + }, + { + "library": "numpy", + "name": "where", + "source_code": "def where(condition, x=_NoValue, y=_NoValue):\n missing = (x is _NoValue, y is _NoValue).count(True)\n if missing == 1:\n raise ValueError(\"Must provide both 'x' and 'y' or neither.\")\n if missing == 2:\n return nonzero(condition)\n cf = filled(condition, False)\n xd = getdata(x)\n yd = getdata(y)\n cm = getmaskarray(condition)\n xm = getmaskarray(x)\n ym = getmaskarray(y)\n if x is masked and y is not masked:\n xd = np.zeros((), dtype=yd.dtype)\n xm = np.ones((), dtype=ym.dtype)\n elif y is masked and x is not masked:\n yd = np.zeros((), dtype=xd.dtype)\n ym = np.ones((), dtype=xm.dtype)\n data = np.where(cf, xd, yd)\n mask = np.where(cf, xm, ym)\n mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)\n mask = _shrink_mask(mask)\n return masked_array(data, mask=mask)", + "docstring": "Return a masked array with elements from or , depending on condition. .. note:: When only is provided, this function is identical to . The rest of this documentation covers only the case where all three arguments are provided. Parameters ---------- condition : array_like, bool Where True, yield , otherwise yield . x, y : array_like, optional Values from which to choose. , and need to be broadcastable to some shape. Returns ------- out : MaskedArray An masked array with elements where the condition is masked, elements from where is True, and elements from elsewhere. See Also -------- numpy.where : Equivalent function in the top-level NumPy module. nonzero : The function that is called when x and y are omitted Examples -------- >>> import numpy as np >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) >>> x masked_array( data=[[0.0, --, 2.0], [--, 4.0, --], [6.0, --, 8.0]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=1e+20) >>> np.ma.where(x > 5, x, -3.1416) masked_array( data=[[-3.1416, --, -3.1416], [--, -3.1416, --], [6.0, --, 8.0]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=1e+20)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:where arg:condition arg:x arg:y arguments arg arg arg Assign Call Compare Compare If Compare Raise Call If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "segment_mean", + "source_code": "@dispatch.dispatch_for_api(math_ops.unsorted_segment_mean)\ndef segment_mean(data: ragged_tensor.RaggedOrDense, segment_ids: ragged_tensor.RaggedOrDense, num_segments, name=None):\n with ops.name_scope(name, 'RaggedSegmentMean', [data, segment_ids, num_segments]):\n total = segment_sum(data, segment_ids, num_segments)\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False)\n count = segment_sum(ones, segment_ids, num_segments)\n if ragged_tensor.is_ragged(total):\n return total.with_flat_values(total.flat_values / count.flat_values)\n else:\n return total / count", + "docstring": "For docs, see: _RAGGED_SEGMENT_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:segment_mean arg:data arg:segment_ids arg:num_segments arg:name arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_dense_tensor", + "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n if isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError('In embedding_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n return self._get_dense_tensor_internal(sparse_tensors, state_manager)", + "docstring": "Returns tensor after doing the embedding lookup. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Embedding lookup tensor. Raises: ValueError: is SequenceCategoricalColumn.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "iter_encode", + "source_code": "def iter_encode(self, obj):\n if obj.get('description', None):\n for row in obj['description'].split('\\n'):\n yield self._encode_comment(row)\n if not obj.get('relation'):\n raise BadObject('Relation name not found or with invalid value.')\n yield self._encode_relation(obj['relation'])\n yield ''\n if not obj.get('attributes'):\n raise BadObject('Attributes not found.')\n attribute_names = set()\n for attr in obj['attributes']:\n if not isinstance(attr, (tuple, list)) or len(attr) != 2 or (not isinstance(attr[0], str)):\n raise BadObject('Invalid attribute declaration \"%s\"' % str(attr))\n if isinstance(attr[1], str):\n if attr[1] not in _SIMPLE_TYPES:\n raise BadObject('Invalid attribute type \"%s\"' % str(attr))\n elif not isinstance(attr[1], (tuple, list)):\n raise BadObject('Invalid attribute type \"%s\"' % str(attr))\n if attr[0] in attribute_names:\n raise BadObject('Trying to use attribute name \"%s\" for the second time.' % str(attr[0]))\n else:\n attribute_names.add(attr[0])\n yield self._encode_attribute(attr[0], attr[1])\n yield ''\n attributes = obj['attributes']\n yield _TK_DATA\n if 'data' in obj:\n data = _get_data_object_for_encoding(obj.get('data'))\n yield from data.encode_data(obj.get('data'), attributes)\n yield ''", + "docstring": "The iterative version of . This encodes iteratively a given object and return, one-by-one, the lines of the ARFF file. :param obj: the object containing the ARFF information. :return: (yields) the ARFF file as strings.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "FunctionDef name:iter_encode arg:self arg:obj arguments arg arg If Call For Call Call If Call Raise Call Call If Call Raise Call Assign Call For If BoolOp Call Compare Call Call Raise Call Call If Call If Compare Raise Call Call If Call Raise Call Call If Compare Raise Call Call Call Call Assign If Compare Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "do_not_convert", + "source_code": "@tf_export('autograph.experimental.do_not_convert')\ndef do_not_convert(func=None):\n if func is None:\n return do_not_convert\n\n def wrapper(*args, **kwargs):\n with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):\n return func(*args, **kwargs)\n if inspect.isfunction(func) or inspect.ismethod(func):\n wrapper = functools.update_wrapper(wrapper, func)\n return autograph_artifact(wrapper)", + "docstring": "Decorator that suppresses the conversion of a function. Args: func: function to decorate. Returns: If is not None, returns a which is equivalent to , but is not converted by AutoGraph. If is None, returns a decorator that, when invoked with a single argument, returns a equivalent to the above case.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "FunctionDef name:do_not_convert arg:func arguments arg If Compare Return return:yes FunctionDef name:wrapper arguments arg arg With Call Return return:yes Call If BoolOp Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "fromarrays", + "source_code": "@set_module('numpy.rec')\ndef fromarrays(arrayList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None):\n arrayList = [sb.asarray(x) for x in arrayList]\n shape = _deprecate_shape_0_as_None(shape)\n if shape is None:\n shape = arrayList[0].shape\n elif isinstance(shape, int):\n shape = (shape,)\n if formats is None and dtype is None:\n formats = [obj.dtype for obj in arrayList]\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder).dtype\n _names = descr.names\n if len(descr) != len(arrayList):\n raise ValueError('mismatch between the number of fields and the number of arrays')\n d0 = descr[0].shape\n nn = len(d0)\n if nn > 0:\n shape = shape[:-nn]\n _array = recarray(shape, descr)\n for k, obj in enumerate(arrayList):\n nn = descr[k].ndim\n testshape = obj.shape[:obj.ndim - nn]\n name = _names[k]\n if testshape != shape:\n raise ValueError(f'array-shape mismatch in array {k} (\"{name}\")')\n _array[name] = obj\n return _array", + "docstring": "Create a record array from a (flat) list of arrays Parameters ---------- arrayList : list or tuple List of array-like objects (such as lists, tuples, and ndarrays). dtype : data-type, optional valid dtype for all arrays shape : int or tuple of ints, optional Shape of the resulting array. If not provided, inferred from `dtypenumpy.rec.format_parser` to construct a dtype. See that function for detailed documentation. Returns ------- np.recarray Record array consisting of given arrayList columns. Examples -------- >>> x1=np.array([1,2,3,4]) >>> x2=np.array(['a','dd','xyz','12']) >>> x3=np.array([1.1,2,3,4]) >>> r = np.rec.fromarrays([x1,x2,x3],names='a,b,c') >>> print(r[1]) (2, 'dd', 2.0) # may vary >>> x1[1]=34 >>> r.a array([1, 2, 3, 4]) >>> x1 = np.array([1, 2, 3, 4]) >>> x2 = np.array(['a', 'dd', 'xyz', '12']) >>> x3 = np.array([1.1, 2, 3,4]) >>> r = np.rec.fromarrays( ... [x1, x2, x3], ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) >>> r rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), (4, b'12', 4. )], dtype=[('a', ' tuple[str, ...]:\n return self._supported_extensions", + "docstring": "Extensions that writer engine supports.", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_base.py", + "ast_data": "FunctionDef name:supported_extensions arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "implicit_replication", + "source_code": "@contextmanager\ndef implicit_replication() -> Iterator[None]:\n try:\n DTensor._op_dispatcher._allow_implicit_replication = True\n yield\n finally:\n DTensor._op_dispatcher._allow_implicit_replication = False", + "docstring": "This context manager allows :class: to implicitly treat all non-DTensors (`DTensor` s are not replicated in practice, please use it at your discretion.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\__init__.py", + "ast_data": "FunctionDef name:implicit_replication arguments Try Assign Assign" + }, + { + "library": "tensorflow", + "name": "close", + "source_code": "def close(self):\n raise NotImplementedError()", + "docstring": "Flushes and closes the summary writer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "transform_function", + "source_code": "def transform_function(self, fn, user_context):\n future_features = inspect_utils.getfutureimports(fn)\n node, source = parser.parse_entity(fn, future_features=future_features)\n logging.log(3, 'Source code of %s:\\n\\n%s\\n', fn, source)\n origin_info.resolve_entity(node, source, fn)\n namespace = inspect_utils.getnamespace(fn)\n namer = naming.Namer(namespace)\n new_name = namer.new_symbol(self.get_transformed_name(node), ())\n entity_info = transformer.EntityInfo(name=new_name, source_code=source, source_file='', future_features=future_features, namespace=namespace)\n context = transformer.Context(entity_info, namer, user_context)\n node = self._erase_arg_defaults(node)\n result = self.transform_ast(node, context)\n return (result, context)", + "docstring": "Transforms a function. Subclasses may override this method. The return value is opaque. The method receives the original AST. The result is passed as-is to the output of . Args: fn: A function or lambda. user_context: An opaque object (may be None) that is forwarded to transform_ast, through the ctx.user attribute. Returns: Tuple[Any, Any]. By default it returns the output of transform_ast, together with a containing information about the transformation process.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py", + "ast_data": "FunctionDef name:transform_function arg:self arg:fn arg:user_context arguments arg arg arg Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_multi_class", + "source_code": "def _check_multi_class(multi_class, solver, n_classes):\n if multi_class == 'auto':\n if solver in ('liblinear',):\n multi_class = 'ovr'\n elif n_classes > 2:\n multi_class = 'multinomial'\n else:\n multi_class = 'ovr'\n if multi_class == 'multinomial' and solver in ('liblinear',):\n raise ValueError('Solver %s does not support a multinomial backend.' % solver)\n return multi_class", + "docstring": "Computes the multi class type, either \"multinomial\" or \"ovr\". For > 2 and a solver that supports it, returns \"multinomial\". For all other cases, in particular binary classification, return \"ovr\".", + "type": "function", + "file_path": "scikit-learn\\sklearn\\linear_model\\_logistic.py", + "ast_data": "FunctionDef name:_check_multi_class arg:multi_class arg:solver arg:n_classes arguments arg arg arg If Compare If Compare Assign If Compare Assign Assign If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "all_reduce_v2", + "source_code": "def all_reduce_v2(t, group_size, group_key, instance_key, merge_op='Add', final_op='Id', communication_hint='auto', timeout=0, ordering_token=None, max_subdivs_per_device=-1, name=None):\n if ordering_token is not None:\n ordering_token = [ordering_token]\n else:\n ordering_token = []\n return gen_collective_ops.collective_reduce_v2(t, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, communication_hint=communication_hint.lower(), timeout_seconds=timeout, is_stateless=False, ordering_token=ordering_token, max_subdivs_per_device=max_subdivs_per_device, name=name)", + "docstring": "Reduces tensors collectively, across devices. Args: t: the tensor to be reduced. group_size: an int32 tensor. The total number of tensors to be collectively reduced. Each must reside on a different device. Should be a positive integer. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. merge_op: string naming the binary Op to be applied to compute each partial reduction. final_op: string naming the unary Op to be applied to each fully reduced value. Can be 'Id' for no operation. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. ordering_token: a resource tensor on the same device as the op to order the collectives in a per-device manner by auto control dependency. This argument can be omited when there is one collective Op per , or when explicit control dependency is used instead of auto control dependency. max_subdivs_per_device: int specifying the maximum number of subdivisions a tensor on a device can be divided into. The runtime uses this contraint to parallelize processing of each per-device tensor. Setting to -1 disables subdivision and reverts to previous behavior of not sub-dividing tensor. Setting to 0 uses sytem defaults. name: name of the Op. Returns: An Op implementing the distributed reduction.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py", + "ast_data": "FunctionDef name:all_reduce_v2 arg:t arg:group_size arg:group_key arg:instance_key arg:merge_op arg:final_op arg:communication_hint arg:timeout arg:ordering_token arg:max_subdivs_per_device arg:name arguments arg arg arg arg arg arg arg arg arg arg arg If Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "replace_math_functions", + "source_code": "def replace_math_functions(input_string):\n output_string = input_string\n for func in MATH_TRANSPILATIONS:\n output_string = output_string.replace(f'{func}(', f'{MATH_TRANSPILATIONS[func]}(')\n return output_string", + "docstring": "FIXME: Temporarily replace std:: invocations of math functions with non-std:: versions to prevent linker errors NOTE: This can lead to correctness issues when running tests, since the correct version of the math function (exp/expf) might not get called. Plan is to remove this function once HIP supports std:: math function calls inside device code", + "type": "function", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:replace_math_functions arg:input_string arguments arg Assign For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "RootDim", + "source_code": "@dataclasses.dataclass\nclass RootDim:\n min: int\n max: Union[int, None]\n derived: list[str]", + "docstring": "This represents a Dim object.", + "type": "class", + "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py", + "ast_data": "ClassDef name:RootDim" + }, + { + "library": "pygame", + "name": "write", + "source_code": "def write(self, data):\n _check_init()\n self._check_open()\n self._output.Write(data)", + "docstring": "writes a list of midi data to the Output Output.write(data) writes series of MIDI information in the form of a list: write([[[status ],timestamp], [[status ],timestamp],...]) fields are optional example: choose program change 1 at time 20000 and send note 65 with velocity 100 500 ms later. write([[[0xc0,0,0],20000],[[0x90,60,100],20500]]) notes: 1. timestamps will be ignored if latency = 0. 2. To get a note to play immediately, send MIDI info with timestamp read from function Time. 3. understanding optional data fields: write([[[0xc0,0,0],20000]]) is equivalent to write([[[0xc0],20000]]) Can send up to 1024 elements in your data list, otherwise an IndexError exception is raised.", + "type": "method", + "file_path": "pygame\\src_py\\midi.py", + "ast_data": "FunctionDef name:write arg:self arg:data arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "for_fetch", + "source_code": "@staticmethod\ndef for_fetch(fetch):\n if fetch is None:\n raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\". Cannot be None')\n elif isinstance(fetch, (list, tuple)):\n return _ListFetchMapper(fetch)\n elif isinstance(fetch, collections_abc.Mapping):\n return _DictFetchMapper(fetch)\n elif _is_attrs_instance(fetch):\n return _AttrsFetchMapper(fetch)\n else:\n for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:\n if isinstance(fetch, tensor_type):\n fetches, contraction_fn = fetch_fn(fetch)\n return _ElementFetchMapper(fetches, contraction_fn)\n raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\"')", + "docstring": "Creates fetch mapper that handles the structure of . The default graph must be the one from which we want to fetch values when this function is called. Args: fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. Returns: An instance of a subclass of that handles the shape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:for_fetch arg:fetch arguments arg If Compare Raise Call Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call For If Call Assign Call Return return:yes Call Raise Call Call" + }, + { + "library": "cherrypy", + "name": "_compat", + "source_code": "def _compat(self, config):\n for section, conf in config.items():\n if isinstance(conf, dict):\n for k in conf:\n if k in self.obsolete:\n warnings.warn('%r is obsolete. Use %r instead.\\nsection: [%s]' % (k, self.obsolete[k], section))\n elif k in self.deprecated:\n warnings.warn('%r is deprecated. Use %r instead.\\nsection: [%s]' % (k, self.deprecated[k], section))\n elif section in self.obsolete:\n warnings.warn('%r is obsolete. Use %r instead.' % (section, self.obsolete[section]))\n elif section in self.deprecated:\n warnings.warn('%r is deprecated. Use %r instead.' % (section, self.deprecated[section]))", + "docstring": "Process config and warn on each obsolete or deprecated entry.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpchecker.py", + "ast_data": "FunctionDef name:_compat arg:self arg:config arguments arg arg For Call If Call For If Compare Call If Compare Call If Compare Call If Compare Call" + }, + { + "library": "tensorflow", + "name": "get_input_shape_at", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_input_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')", + "docstring": "Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:get_input_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "segment_sqrt_n", + "source_code": "@dispatch.dispatch_for_api(math_ops.unsorted_segment_sqrt_n)\ndef segment_sqrt_n(data: ragged_tensor.RaggedOrDense, segment_ids: ragged_tensor.RaggedOrDense, num_segments, name=None):\n with ops.name_scope(name, 'RaggedSegmentSqrtN', [data, segment_ids, num_segments]):\n total = segment_sum(data, segment_ids, num_segments)\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False)\n count = segment_sum(ones, segment_ids, num_segments)\n if ragged_tensor.is_ragged(total):\n return total.with_flat_values(total.flat_values / math_ops.sqrt(count.flat_values))\n else:\n return total / math_ops.sqrt(count)", + "docstring": "For docs, see: _RAGGED_SEGMENT_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:segment_sqrt_n arg:data arg:segment_ids arg:num_segments arg:name arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_optimizer_step_code", + "source_code": "def _optimizer_step_code(self) -> None:\n pass", + "docstring": "Entry point for . When python tracing is enabled the profiler will hook into this function at the CPython level to inspect the optimizer's parameters and param groups. It is called it after since many optimizers lazily initialize state. This is a workaround due to lack of a proper step hook on the optimizer, and will be removed if it exists.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\optimizer.py", + "ast_data": "FunctionDef name:_optimizer_step_code arg:self arguments arg" + }, + { + "library": "matplotlib", + "name": "get_stretch", + "source_code": "def get_stretch(self):\n return self._stretch", + "docstring": "Return the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:get_stretch arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_values_for_factorize", + "source_code": "def _values_for_factorize(self) -> tuple[np.ndarray, Any]:\n values = self._pa_array.to_numpy()\n return (values, self.dtype.na_value)", + "docstring": "Return an array and missing value suitable for factorization. Returns ------- values : ndarray na_value : pd.NA Notes ----- The values returned by this method are also used in :func:.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:_values_for_factorize arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "django", + "name": "CursorMixin", + "source_code": "class CursorMixin:\n\n def callproc(self, name, args=None):\n if not isinstance(name, sql.Identifier):\n name = sql.Identifier(name)\n qparts = [sql.SQL('SELECT * FROM '), name, sql.SQL('(')]\n if args:\n for item in args:\n qparts.append(sql.Literal(item))\n qparts.append(sql.SQL(','))\n del qparts[-1]\n qparts.append(sql.SQL(')'))\n stmt = sql.Composed(qparts)\n self.execute(stmt)\n return args", + "docstring": "A subclass of psycopg cursor implementing callproc.", + "type": "class", + "file_path": "django\\django\\db\\backends\\postgresql\\base.py", + "ast_data": "ClassDef name:CursorMixin FunctionDef name:callproc arg:self arg:name arg:args arguments arg arg arg If Call Assign Call Assign Call Call If For Call Call Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "transform_output_tensor", + "source_code": "def transform_output_tensor(self, output: Tensor, output_shape: Tuple[int, ...]) -> Tensor:\n return _transform_output_shape(output, output_shape) if self.keepdim else output", + "docstring": "Standardize output tensors.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\base.py", + "ast_data": "FunctionDef name:transform_output_tensor arg:self arg:output arg:output_shape arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "PrefetchBenchmark", + "source_code": "class PrefetchBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n def benchmark_prefetch(self):\n num_elements = 1000000\n for prefetch_buffer in [1, 5, 10, 20, 100]:\n dataset = dataset_ops.Dataset.range(num_elements)\n dataset = dataset.prefetch(prefetch_buffer)\n self.run_and_report_benchmark(dataset, num_elements=num_elements, extras={'model_name': 'prefetch.benchmark.1', 'parameters': '%d' % prefetch_buffer}, name='prefetch_{}'.format(prefetch_buffer))", + "docstring": "Benchmarks for .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\prefetch_benchmark.py", + "ast_data": "ClassDef name:PrefetchBenchmark FunctionDef name:benchmark_prefetch arg:self arguments arg Assign For Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "clamp_probs", + "source_code": "def clamp_probs(probs):\n eps = torch.finfo(probs.dtype).eps\n return probs.clamp(min=eps, max=1 - eps)", + "docstring": "Clamps the probabilities to be in the open interval . The probabilities would be clamped between and , and would be the smallest representable positive number for the input data type. Args: probs (Tensor): A tensor of probabilities. Returns: Tensor: The clamped probabilities. Examples: >>> probs = torch.tensor([0.0, 0.5, 1.0]) >>> clamp_probs(probs) tensor([1.1921e-07, 5.0000e-01, 1.0000e+00]) >>> probs = torch.tensor([0.0, 0.5, 1.0], dtype=torch.float64) >>> clamp_probs(probs) tensor([2.2204e-16, 5.0000e-01, 1.0000e+00], dtype=torch.float64)", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\utils.py", + "ast_data": "FunctionDef name:clamp_probs arg:probs arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "is_marray", + "source_code": "def is_marray(xp):\n return 'marray' in xp.__name__", + "docstring": "Returns True if is an MArray namespace; False otherwise.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_array_api.py", + "ast_data": "FunctionDef name:is_marray arg:xp arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "compare_model_outputs", + "source_code": "def compare_model_outputs(float_model: nn.Module, q_model: nn.Module, *data, logger_cls=OutputLogger, allow_list=None) -> dict[str, dict[str, torch.Tensor]]:\n torch._C._log_api_usage_once('quantization_api._numeric_suite.compare_model_outputs')\n if allow_list is None:\n allow_list = get_default_compare_output_module_list()\n prepare_model_outputs(float_model, q_model, logger_cls, allow_list)\n float_model(*data)\n q_model(*data)\n act_compare_dict = get_matching_activations(float_model, q_model)\n return act_compare_dict", + "docstring": "Compare output activations between float and quantized models at corresponding locations for the same input. Return a dict with key corresponding to quantized module names and each entry being a dictionary with two keys 'float' and 'quantized', containing the activations of quantized model and float model at matching locations. This dict can be used to compare and compute the propagation quantization error. Example usage:: act_compare_dict = compare_model_outputs(float_model, qmodel, data) for key in act_compare_dict: print( key, compute_error( act_compare_dict[key]['float'], act_compare_dict[key]['quantized'].dequantize() ) ) Args: float_model: float model used to generate the q_model q_model: model quantized from float_model data: input data used to run the prepared float_model and q_model logger_cls: type of logger to be attached to float_module and q_module allow_list: list of module types to attach logger Return: act_compare_dict: dict with key corresponding to quantized module names and each entry being a dictionary with two keys 'float' and 'quantized', containing the matching float and quantized activations", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "FunctionDef name:compare_model_outputs arg:float_model arg:q_model arguments arg arg arg arg arg Call If Compare Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "set_global_backend", + "source_code": "def set_global_backend(backend, coerce=False, only=False, *, try_last=False):\n _uarray.set_global_backend(backend, coerce, only, try_last)", + "docstring": "This utility method replaces the default backend for permanent use. It will be tried in the list of backends automatically, unless the `set_backend`, the global backend is tried after registered backends. See Also -------- set_backend: A context manager that allows setting of backends. skip_backend: A context manager that allows skipping of backends.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", + "ast_data": "FunctionDef name:set_global_backend arg:backend arg:coerce arg:only arguments arg arg arg arg Call" + }, + { + "library": "numpy", + "name": "atleast_1d", + "source_code": "@array_function_dispatch(_atleast_1d_dispatcher)\ndef atleast_1d(*arys):\n if len(arys) == 1:\n result = asanyarray(arys[0])\n if result.ndim == 0:\n result = result.reshape(1)\n return result\n res = []\n for ary in arys:\n result = asanyarray(ary)\n if result.ndim == 0:\n result = result.reshape(1)\n res.append(result)\n return tuple(res)", + "docstring": "Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or tuple of arrays, each with ``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) (array([1]), array([3, 4]))", + "type": "function", + "file_path": "numpy\\numpy\\_core\\shape_base.py", + "ast_data": "FunctionDef name:atleast_1d arguments arg If Compare Call Assign Call If Compare Assign Call Return return:yes Assign For Assign Call If Compare Assign Call Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "solve", + "source_code": "@array_function_dispatch(_solve_dispatcher)\ndef solve(a, b):\n a, _ = _makearray(a)\n _assert_stacked_square(a)\n b, wrap = _makearray(b)\n t, result_t = _commonType(a, b)\n if b.ndim == 1:\n gufunc = _umath_linalg.solve1\n else:\n gufunc = _umath_linalg.solve\n signature = 'DD->D' if isComplexType(t) else 'dd->d'\n with errstate(call=_raise_linalgerror_singular, invalid='call', over='ignore', divide='ignore', under='ignore'):\n r = gufunc(a, b, signature=signature)\n return wrap(r.astype(result_t, copy=False))", + "docstring": "Solve a linear matrix equation, or system of linear scalar equations. Computes the \"exact\" solution, , of the well-determined, i.e., full rank, linear matrix equation . Parameters ---------- a : (..., M, M) array_like Coefficient matrix. b : {(M,), (..., M, K)}, array_like Ordinate or \"dependent variable\" values. Returns ------- x : {(..., M,), (..., M, K)} ndarray Solution to the system a x = b. Returned shape is (..., M) if b is shape (M,) and (..., M, K) if b is (..., M, K), where the \"...\" part is broadcasted between a and b. Raises ------ LinAlgError If is singular or not square. See Also -------- scipy.linalg.solve : Similar function in SciPy. Notes ----- Broadcasting rules apply, see the documentation for details. The solutions are computed using LAPACK routine `alstsq`: >>> import numpy as np >>> a = np.array([[1, 2], [3, 5]]) >>> b = np.array([1, 2]) >>> x = np.linalg.solve(a, b) >>> x array([-1., 1.]) Check that the solution is correct: >>> np.allclose(np.dot(a, x), b) True", + "type": "function", + "file_path": "numpy\\numpy\\linalg\\_linalg.py", + "ast_data": "FunctionDef name:solve arg:a arg:b arguments arg arg Assign Call Call Assign Call Assign Call If Compare Assign Assign Assign Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "authlib", + "name": "validate_nonce", + "source_code": "def validate_nonce(self):\n nonce_value = self.params.get('nonce')\n if nonce_value:\n if 'nonce' not in self:\n raise MissingClaimError('nonce')\n if nonce_value != self['nonce']:\n raise InvalidClaimError('nonce')", + "docstring": "String value used to associate a Client session with an ID Token, and to mitigate replay attacks. The value is passed through unmodified from the Authentication Request to the ID Token. If present in the ID Token, Clients MUST verify that the nonce Claim Value is equal to the value of the nonce parameter sent in the Authentication Request. If present in the Authentication Request, Authorization Servers MUST include a nonce Claim in the ID Token with the Claim Value being the nonce value sent in the Authentication Request. Authorization Servers SHOULD perform no other processing on nonce values used. The nonce value is a case sensitive string.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\claims.py", + "ast_data": "FunctionDef name:validate_nonce arg:self arguments arg Assign Call If If Compare Raise Call If Compare Raise Call" + }, + { + "library": "pytorch", + "name": "_load_dispatch_table", + "source_code": "@classmethod\ndef _load_dispatch_table(cls, custom_dispatch_table=None) -> None:\n if getattr(cls, 'SPARSE_DISPATCH', None) is None:\n cls.SPARSE_DISPATCH = {torch.ops.aten.values: semi_sparse_values, torch.ops.aten.indices: semi_sparse_indices, torch.ops.aten.is_same_size: fallback_dispatcher, torch.ops.aten.detach_: fallback_dispatcher, torch.ops.aten.detach: semi_sparse_detach, torch.ops.aten.t: semi_sparse_t, torch.ops.aten.view: semi_sparse_view, torch.ops.aten.mm: semi_sparse_mm, torch.ops.aten.matmul: semi_sparse_mm, torch.ops.aten.addmm: semi_sparse_addmm, torch.ops.aten.linear: semi_sparse_linear, torch.ops.aten._to_copy: fallback_dispatcher, torch.ops.aten._scaled_mm: semi_sparse_scaled_mm}\n if custom_dispatch_table is not None:\n cls.SPARSE_DISPATCH.update(custom_dispatch_table)", + "docstring": "Loads the op overload sparse dispatch table for the current class.", + "type": "method", + "file_path": "pytorch\\torch\\sparse\\semi_structured.py", + "ast_data": "FunctionDef name:_load_dispatch_table arg:cls arg:custom_dispatch_table arguments arg arg If Compare Call Assign If Compare Call" + }, + { + "library": "pytorch", + "name": "serialize", + "source_code": "def serialize(self) -> _WireProtocolPickledInput:\n from torch.fx._graph_pickler import GraphPickler\n return _WireProtocolPickledInput(GraphPickler.dumps(self))", + "docstring": "Turns this object into a _WireProtocolPickledInput which can be directly transferred across a stream.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py", + "ast_data": "FunctionDef name:serialize arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "delete", + "source_code": "def delete(self, url, **kwargs):\n return self.request('DELETE', url, **kwargs)", + "docstring": "Invoke DELETE http request. If `` configured, shortcut is available:: client.delete(\"posts/123\")", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py", + "ast_data": "FunctionDef name:delete arg:self arg:url arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "BatchableExtensionType", + "source_code": "@tf_export('experimental.BatchableExtensionType')\nclass BatchableExtensionType(ExtensionType):\n _tf_extension_type_do_not_transform_this_class = True", + "docstring": "An ExtensionType that can be batched and unbatched. s can be used with APIs that require batching or unbatching, including , , and . E.g.: >>> class Vehicle(tf.experimental.BatchableExtensionType): ... top_speed: tf.Tensor ... mpg: tf.Tensor >>> batch = Vehicle([120, 150, 80], [30, 40, 12]) >>> tf.map_fn(lambda vehicle: vehicle.top_speed * vehicle.mpg, batch, ... fn_output_signature=tf.int32).numpy() array([3600, 6000, 960], dtype=int32) An is used by these APIs to encode values. The default encoder assumes that values can be stacked, unstacked, or concatenated by simply stacking, unstacking, or concatenating every nested , , , or field. Extension types where this is not the case will need to override with a custom . See for more details.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "ClassDef name:BatchableExtensionType Assign Call" + }, + { + "library": "scipy", + "name": "gauss_spline", + "source_code": "def gauss_spline(x, n):\n xp = array_namespace(x)\n x = xp.asarray(x)\n signsq = (n + 1) / 12.0\n return 1 / math.sqrt(2 * math.pi * signsq) * xp.exp(-x ** 2 / 2 / signsq)", + "docstring": "Gaussian approximation to B-spline basis function of order n. Parameters ---------- x : array_like a knot vector n : int The order of the spline. Must be non-negative, i.e., n >= 0 Returns ------- res : ndarray B-spline basis function values approximated by a zero-mean Gaussian function. Notes ----- The B-spline basis function can be approximated well by a zero-mean Gaussian function with standard-deviation equal to :math: for large : .. math:: \\frac{1}{\\sqrt {2\\pi\\sigma^2}}exp(-\\frac{x^2}{2\\sigma}) References ---------- .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer Science, vol 4485. Springer, Berlin, Heidelberg .. [2] Examples -------- We can calculate B-Spline basis functions approximated by a gaussian distribution: >>> import numpy as np >>> from scipy.signal import gauss_spline >>> knots = np.array([-1.0, 0.0, -1.0]) >>> gauss_spline(knots, 3) array([0.15418033, 0.6909883, 0.15418033]) # may vary", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_spline_filters.py", + "ast_data": "FunctionDef name:gauss_spline arg:x arg:n arguments arg arg Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_min_matrix_dim_tensor", + "source_code": "def _min_matrix_dim_tensor(self):\n return math_ops.reduce_min(self.shape_tensor()[-2:])", + "docstring": "Minimum of domain/range dimension, as a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py", + "ast_data": "FunctionDef name:_min_matrix_dim_tensor arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "forward_index", + "source_code": "@property\ndef forward_index(self):\n return self._forward_index", + "docstring": "The loop index of forward loop.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:forward_index arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "prev", + "source_code": "@property\ndef prev(self) -> 'Node':\n return self._prev", + "docstring": "Returns the previous `` in the linked list of Nodes.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:prev arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "get", + "source_code": "def get(self, key: str):\n with patch_pickle():\n group = self.get_node(key)\n if group is None:\n raise KeyError(f'No object named {key} in the file')\n return self._read_group(group)", + "docstring": "Retrieve pandas object stored in file. Parameters ---------- key : str Object to retrieve from file. Raises KeyError if not found. Returns ------- object Same type as object stored in file. See Also -------- HDFStore.get_node : Returns the node with the key. HDFStore.get_storer : Returns the storer object for a key. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df) # doctest: +SKIP >>> store.get(\"data\") # doctest: +SKIP >>> store.close() # doctest: +SKIP", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:get arg:self arg:key arguments arg arg With Call Assign Call If Compare Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "_do_update", + "source_code": "def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):\n filtered = base_qs.filter(pk=pk_val)\n if not values:\n return update_fields is not None or filtered.exists()\n if self._meta.select_on_save and (not forced_update):\n return filtered.exists() and (filtered._update(values) > 0 or filtered.exists())\n return filtered._update(values) > 0", + "docstring": "Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB).", + "type": "method", + "file_path": "django\\django\\db\\models\\base.py", + "ast_data": "FunctionDef name:_do_update arg:self arg:base_qs arg:using arg:pk_val arg:values arg:update_fields arg:forced_update arguments arg arg arg arg arg arg arg Assign Call If Return return:yes BoolOp Compare Call If BoolOp Return return:yes BoolOp Call BoolOp Compare Call Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "get_sequence_dense_tensor", + "source_code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n if not isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError('In embedding_column: {}. categorical_column must be of type SequenceCategoricalColumn to use SequenceFeatures. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n dense_tensor = self._get_dense_tensor_internal(transformation_cache, state_manager)\n sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)\n return SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "view_limits", + "source_code": "def view_limits(self, vmin, vmax):\n return mtransforms.nonsingular(vmin, vmax)", + "docstring": "Select a scale for the range from vmin to vmax. Subclasses should override this method to change locator behaviour.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "do_encode", + "source_code": "def do_encode(self, type_spec_value, encode_fn):\n type_state = type_spec_value._serialize()\n num_flat_components = len(nest.flatten(type_spec_value._component_specs, expand_composites=True))\n encoded_type_spec = struct_pb2.StructuredValue()\n encoded_type_spec.type_spec_value.CopyFrom(struct_pb2.TypeSpecProto(type_spec_class=self.type_spec_proto_enum, type_state=encode_fn(type_state), type_spec_class_name=self.type_spec_class.__name__, num_flat_components=num_flat_components))\n return encoded_type_spec", + "docstring": "Returns an encoded proto for the given built-in TypeSpec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "FunctionDef name:do_encode arg:self arg:type_spec_value arg:encode_fn arguments arg arg arg Assign Call Assign Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "on_run_start", + "source_code": "@abc.abstractmethod\ndef on_run_start(self, request):\n pass", + "docstring": "Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens after the wrapper's run() call is entered, after an increment of run call counter. Args: request: () callback request object carrying information about the run call such as the fetches, feed dict, run options, run metadata, and how many calls to this wrapper session have occurred. Returns: An instance of , carrying information to debug URLs used to watch the tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:on_run_start arg:self arg:request arguments arg arg" + }, + { + "library": "numpy", + "name": "_zseries_to_cseries", + "source_code": "def _zseries_to_cseries(zs):\n n = (zs.size + 1) // 2\n c = zs[n - 1:].copy()\n c[1:n] *= 2\n return c", + "docstring": "Convert z-series to a Chebyshev series. Convert a z series to the equivalent Chebyshev series. The result is never an empty array. The dtype of the return is the same as that of the input. No checks are run on the arguments as this routine is for internal use. Parameters ---------- zs : 1-D ndarray Odd length symmetric z-series, ordered from low to high. Returns ------- c : 1-D ndarray Chebyshev coefficients, ordered from low to high.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:_zseries_to_cseries arg:zs arguments arg Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "start_event_loop", + "source_code": "def start_event_loop(self, timeout=0):\n if timeout <= 0:\n timeout = np.inf\n timestep = 0.01\n counter = 0\n self._looping = True\n while self._looping and counter * timestep < timeout:\n self.flush_events()\n time.sleep(timestep)\n counter += 1", + "docstring": "Start a blocking event loop. Such an event loop is used by interactive functions, such as and , to wait for events. The event loop blocks until a callback function triggers , or *timeout* is reached. If *timeout* is 0 or negative, never timeout. Only interactive backends need to reimplement this method and it relies on being properly implemented. Interactive backends should implement this in a more native way.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:start_event_loop arg:self arg:timeout arguments arg arg If Compare Assign Assign Assign Assign While BoolOp Compare Call Call" + }, + { + "library": "numpy", + "name": "bincount", + "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)\ndef bincount(x, weights=None, minlength=None):\n return (x, weights)", + "docstring": "bincount(x, /, weights=None, minlength=0) Count number of occurrences of each value in array of non-negative ints. The number of bins (of size 1) is one larger than the largest value in . If is specified, there will be at least this number of bins in the output array (though it will be longer if necessary, depending on the contents of ). Each bin gives the number of occurrences of its index value in . If is specified the input array is weighted by it, i.e. if a value `xoutminlength` keyword. >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights >>> x = np.array([0, 1, 1, 2, 2, 2]) >>> np.bincount(x, weights=w) array([ 0.3, 0.7, 1.1])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\multiarray.py", + "ast_data": "FunctionDef name:bincount arg:x arg:weights arg:minlength arguments arg arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_claims_parameter_supported", + "source_code": "def validate_claims_parameter_supported(self):\n _validate_boolean_value(self, 'claims_parameter_supported')", + "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. If omitted, the default value is false.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", + "ast_data": "FunctionDef name:validate_claims_parameter_supported arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "to_map", + "source_code": "def to_map(val_or_map: Union[Std, dict[int, Std]], local_world_size: int) -> dict[int, Std]:\n if isinstance(val_or_map, Std):\n return dict.fromkeys(range(local_world_size), val_or_map)\n else:\n map = {}\n for i in range(local_world_size):\n map[i] = val_or_map.get(i, Std.NONE)\n return map", + "docstring": "Certain APIs take redirect settings either as a single value (e.g. apply to all local ranks) or as an explicit user-provided mapping. This method is a convenience method that converts a value or mapping into a mapping. Example: :: to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} to_map( {0: Std.OUT, 1: Std.OUT}, local_world_size=2 ) # returns: {0: Std.OUT, 1: Std.OUT}", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", + "ast_data": "FunctionDef name:to_map arg:val_or_map arg:local_world_size arguments arg arg If Call Return return:yes Call Call Assign For Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "scatter", + "source_code": "def scatter(tensors, src=0, group=group.WORLD):\n return _Scatter.apply(src, group, *tensors)", + "docstring": "Scatters a list of tensors to all processes in a group. Each process will receive exactly one tensor and store its data in the `. src (int, optional): Source rank (default is 0). group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output tensor from the scatter operation.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\nn\\functional.py", + "ast_data": "FunctionDef name:scatter arg:tensors arg:src arg:group arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "check_str_arg", + "source_code": "def check_str_arg(result, func, cargs):\n dbl = result\n ptr = cargs[-1]._obj\n return (dbl, ptr.value.decode())", + "docstring": "This is for the OSRGet[Angular|Linear]Units functions, which require that the returned string pointer not be freed. This returns both the double and string values.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py", + "ast_data": "FunctionDef name:check_str_arg arg:result arg:func arg:cargs arguments arg arg arg Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_reciprocal_flops", + "source_code": "@ops.RegisterStatistics('Reciprocal', 'flops')\ndef _reciprocal_flops(graph, node):\n return _unary_op_flops(graph, node)", + "docstring": "Compute flops for Reciprocal operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_reciprocal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "Parameter", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass Parameter:\n name: str\n type_constraint: TypeConstraintParam\n required: bool\n variadic: bool\n default: Any = _EMPTY_DEFAULT\n\n def __str__(self) -> str:\n type_str = self.type_constraint.name\n if self.has_default():\n return f'{self.name}: {type_str} = {self.default}'\n return f'{self.name}: {type_str}'\n\n def has_default(self) -> bool:\n return self.default is not _EMPTY_DEFAULT", + "docstring": "A formal parameter of an operator.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py", + "ast_data": "ClassDef name:Parameter FunctionDef name:__str__ arg:self arguments arg Assign If Call Return return:yes Return return:yes FunctionDef name:has_default arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "_get_getitem_freq", + "source_code": "def _get_getitem_freq(self, key) -> BaseOffset | None:\n is_period = isinstance(self.dtype, PeriodDtype)\n if is_period:\n freq = self.freq\n elif self.ndim != 1:\n freq = None\n else:\n key = check_array_indexer(self, key)\n freq = None\n if isinstance(key, slice):\n if self.freq is not None and key.step is not None:\n freq = key.step * self.freq\n else:\n freq = self.freq\n elif key is Ellipsis:\n freq = self.freq\n elif com.is_bool_indexer(key):\n new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))\n if isinstance(new_key, slice):\n return self._get_getitem_freq(new_key)\n return freq", + "docstring": "Find the attribute to assign to the result of a __getitem__ lookup.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_get_getitem_freq arg:self arg:key arguments arg arg Assign Call If Assign If Compare Assign Assign Call Assign If Call If BoolOp Compare Compare Assign Assign If Compare Assign If Call Assign Call Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_num_buckets", + "source_code": "@property\ndef _num_buckets(self):\n return self.hash_bucket_size", + "docstring": "Returns number of buckets in this sparse feature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_verify_same_dense_param_type", + "source_code": "def _verify_same_dense_param_type(self) -> None:\n typename = torch.typename(self._all_params[0])\n if self._all_params[0].is_sparse:\n raise ValueError(f'ZeroRedundancyOptimizer only supports using the same dense type for all parameters but got {typename}')\n for param in self._all_params[1:]:\n other_typename = torch.typename(param)\n if other_typename != typename:\n raise ValueError(f'ZeroRedundancyOptimizer only supports using the same dense type for all parameters but got both {typename} and {other_typename}')", + "docstring": "Verify that all parameters are of the same dense type. The method assumes that `` contains sparse parameters or parameters of varying dense types. NOTE: This method can be removed once support for sparse parameters and varying parameter types is added.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_verify_same_dense_param_type arg:self arguments arg Assign Call If Raise Call For Assign Call If Compare Raise Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, path=None):\n if path is None:\n request = cherrypy.serving.request\n path = request.script_name + request.path_info\n self.args = (path,)\n HTTPError.__init__(self, 404, \"The path '%s' was not found.\" % path)", + "docstring": "Initialize an HTTP Not Found error.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:path arguments arg arg If Compare Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "QueueClosedError", + "source_code": "class QueueClosedError(Exception):\n pass", + "docstring": "Raised when CloseableQueue.put() fails because the queue is closed.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", + "ast_data": "ClassDef name:QueueClosedError" + }, + { + "library": "pytorch", + "name": "from_str", + "source_code": "@staticmethod\ndef from_str(action_string: str):\n action_string = action_string.strip()\n if (match := _action_regex.match(action_string)):\n stage_index, computation_type, microbatch_index = match.groups()\n return _Action(int(stage_index), _ComputationType.from_str(computation_type), int(microbatch_index) if len(microbatch_index) else None)\n elif action_string == '':\n return None\n raise RuntimeError(f'Invalid action string: {action_string}, should be formatted as [stage][action type][(microbatch)] e.g. 2F0')", + "docstring": "Reverse of __repr__ String should be formatted as [stage][action type][(microbatch)] e.g. , ,", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:from_str arg:action_string arguments arg Assign Call If Call Assign Call Return return:yes Call Call Call Call Call If Compare Return return:no Raise Call" + }, + { + "library": "django", + "name": "deconstruct", + "source_code": "def deconstruct(self):\n qs_class = self._queryset_class\n if getattr(self, '_built_with_as_manager', False):\n return (True, None, '%s.%s' % (qs_class.__module__, qs_class.__name__), None, None)\n else:\n module_name = self.__module__\n name = self.__class__.__name__\n module = import_module(module_name)\n if not hasattr(module, name):\n raise ValueError(\"Could not find manager %s in %s.\\nPlease note that you need to inherit from managers you dynamically generated with 'from_queryset()'.\" % (name, module_name))\n return (False, '%s.%s' % (module_name, name), None, self._constructor_args[0], self._constructor_args[1])", + "docstring": "Return a 5-tuple of the form (as_manager (True), manager_class, queryset_class, args, kwargs). Raise a ValueError if the manager is dynamically generated.", + "type": "method", + "file_path": "django\\django\\db\\models\\manager.py", + "ast_data": "FunctionDef name:deconstruct arg:self arguments arg Assign If Call Return return:yes Assign Assign Assign Call If Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "wrap_layer_objects", + "source_code": "def wrap_layer_objects(layer, serialization_cache):\n all_losses = layer._callable_losses[:]\n for child_layer in utils.list_all_layers(layer):\n all_losses.extend(child_layer._callable_losses)\n keras_loss_cache = serialization_cache.setdefault('keras_losses', {})\n wrapped_loss_functions = []\n for loss_fn in all_losses:\n if loss_fn in keras_loss_cache:\n wrapped_loss_functions.append(keras_loss_cache[loss_fn])\n else:\n wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))\n keras_loss_cache[loss_fn] = wrapped_loss\n wrapped_loss_functions.append(wrapped_loss)\n wrapped_layer_losses = [keras_loss_cache[fn] for fn in layer._callable_losses[:]]\n layer_metrics = data_structures.wrap_or_unwrap({m.name: m for m in layer._metrics})\n return dict(variables=data_structures.wrap_or_unwrap(layer.variables), trainable_variables=data_structures.wrap_or_unwrap(layer.trainable_variables), non_trainable_variables=data_structures.wrap_or_unwrap(layer.non_trainable_variables), layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)), metrics=data_structures.wrap_or_unwrap(layer.metrics), regularization_losses=data_structures.wrap_or_unwrap(wrapped_loss_functions), layer_regularization_losses=data_structures.wrap_or_unwrap(wrapped_layer_losses), layer_metrics=layer_metrics)", + "docstring": "Returns extra trackable objects to attach to the serialized layer. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all checkpointable objects from a SerializedAttributes object. See LayerAttributes and ModelAttributes for entire list of objects", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:wrap_layer_objects arg:layer arg:serialization_cache arguments arg arg Assign For Call Call Assign Call Assign For If Compare Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call Call Call Call Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "jac", + "source_code": "@property\ndef jac(self):\n if self._g is None:\n self._g = self._jac(self._x)\n return self._g", + "docstring": "Value of Jacobian of objective function at current iteration.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion.py", + "ast_data": "FunctionDef name:jac arg:self arguments arg If Compare Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "restore", + "source_code": "def restore(self):\n pass", + "docstring": "Restore the graphics context from the stack - needed only for backends that save graphics contexts on a stack.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:restore arg:self arguments arg" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> RSAPrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "django", + "name": "translation_file_changed", + "source_code": "def translation_file_changed(sender, file_path, **kwargs):\n if file_path.suffix == '.mo':\n import gettext\n from django.utils.translation import trans_real\n gettext._translations = {}\n trans_real._translations = {}\n trans_real._default = None\n trans_real._active = Local()\n return True", + "docstring": "Clear the internal translations cache if a .mo file is modified.", + "type": "function", + "file_path": "django\\django\\utils\\translation\\reloader.py", + "ast_data": "FunctionDef name:translation_file_changed arg:sender arg:file_path arguments arg arg arg If Compare Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "data", + "source_code": "@property\ndef data(self):\n return self._data.value", + "docstring": "A pointer to the memory area of the array as a Python integer. This memory area may contain data that is not aligned, or not in correct byte-order. The memory area may not even be writeable. The array flags and data-type of this array should be respected when passing this attribute to arbitrary C-code to avoid trouble that can include Python crashing. User Beware! The value of this attribute is exactly the same as: ``", + "type": "method", + "file_path": "numpy\\numpy\\_core\\_internal.py", + "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "histogram_summary", + "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.')\ndef histogram_summary(tag, values, collections=None, name=None):\n with ops.name_scope(name, 'HistogramSummary', [tag, values]) as scope:\n val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val", + "docstring": "Outputs a protocol buffer with a histogram. This ops is deprecated. Please switch to tf.summary.histogram. For an explanation of why this op was deprecated, and information on how to migrate, look ['here']( The generated []( has one summary value containing a histogram for . This op reports an error if any value is not finite. Args: tag: A . 0-D. Tag to use for the summary value. values: A real numeric . Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py", + "ast_data": "FunctionDef name:histogram_summary arg:tag arg:values arg:collections arg:name arguments arg arg arg arg With Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_equalization_qconfig_dict", + "source_code": "def get_equalization_qconfig_dict(layer_sqnr_dict: dict[str, float], num_layers_to_equalize: int) -> Any:\n layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=operator.itemgetter(1))\n layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]\n module_to_qconfig_list = [(item[0], default_equalization_qconfig) for item in layers_to_equalize]\n equalization_qconfig_dict = {'module_name': module_to_qconfig_list}\n return equalization_qconfig_dict", + "docstring": "Given the layer to SQNR dictionary, find the layers with the highest quantization errors, and return an equalization_qconfig_dict specifying to only equalize those top layers. Args: layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found when comparing an equalized model against a float model) num_layers_to_equalize: Number of layers with the highest quantization errors to equalize", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:get_equalization_qconfig_dict arg:layer_sqnr_dict arg:num_layers_to_equalize arguments arg arg Assign Call Call Call Assign Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "autoscale_None", + "source_code": "def autoscale_None(self):\n self._colorizer.autoscale_None(self._A)", + "docstring": "Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:autoscale_None arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "while_body", + "source_code": "def while_body(i, *ta_list):\n fn_conv = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())\n fn_output = nest.flatten(fn_conv(i))\n if len(fn_output) != len(flat_loop_fn_dtypes):\n raise ValueError(f'Number of expected outputs {len(flat_loop_fn_dtypes)}, does not match the number of actual outputs {len(fn_output)} from loop_fn: {loop_fn} with output {fn_output}.')\n outputs = []\n del is_none_list[:]\n is_none_list.extend((x is None for x in fn_output))\n for out, ta in zip(fn_output, ta_list):\n if out is not None:\n ta = ta.write(i, out)\n outputs.append(ta)\n return tuple([i + 1] + outputs)", + "docstring": "Body of while loop.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py", + "ast_data": "FunctionDef name:while_body arg:i arguments arg arg Assign Call Call Assign Call Call If Compare Call Call Raise Call Call Call Assign Call Compare For Call If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "orthogonal_procrustes", + "source_code": "@_apply_over_batch(('A', 2), ('B', 2))\ndef orthogonal_procrustes(A, B, check_finite=True):\n if check_finite:\n A = np.asarray_chkfinite(A)\n B = np.asarray_chkfinite(B)\n else:\n A = np.asanyarray(A)\n B = np.asanyarray(B)\n if A.ndim != 2:\n raise ValueError(f'expected ndim to be 2, but observed {A.ndim}')\n if A.shape != B.shape:\n raise ValueError(f'the shapes of A and B differ ({A.shape} vs {B.shape})')\n u, w, vt = svd((B.T @ np.conjugate(A)).T)\n R = u @ vt\n scale = w.sum()\n return (R, scale)", + "docstring": "Compute the matrix solution of the orthogonal (or unitary) Procrustes problem. Given matrices and of the same shape, find an orthogonal (or unitary in the case of complex input) matrix that most closely maps to using the algorithm given in [1]_. Parameters ---------- A : (M, N) array_like Matrix to be mapped. B : (M, N) array_like Target matrix. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : (N, N) ndarray The matrix solution of the orthogonal Procrustes problem. Minimizes the Frobenius norm of `10.1007/BF02289451orthogonal_procrustes`. >>> R, _ = orthogonal_procrustes(A, B) >>> np.allclose(R, Q) True", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_procrustes.py", + "ast_data": "FunctionDef name:orthogonal_procrustes arg:A arg:B arg:check_finite arguments arg arg arg If Assign Call Assign Call Assign Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reduce_acc_nodes_non_tensor_input_helper", + "source_code": "def reduce_acc_nodes_non_tensor_input_helper(self, cpu_worklist: NodeList):\n while cpu_worklist:\n node = cpu_worklist.pop(0)\n for user in node.users:\n if user in self.acc_nodes:\n self.acc_nodes.remove(user)\n if not is_node_output_tensor(user):\n cpu_worklist.append(user)", + "docstring": "Transitively excludes nodes from ACC supported set. For every node in the worklist: - removes its downstream ACC nodes from ACC supported set, - if any downstream ACC node produces non-tensor output, then it gets added into the worklist.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", + "ast_data": "FunctionDef name:reduce_acc_nodes_non_tensor_input_helper arg:self arg:cpu_worklist arguments arg arg While Assign Call For If Compare Call If Call Call" + }, + { + "library": "pytorch", + "name": "impl_abstract", + "source_code": "@deprecated('`torch.library.impl_abstract` was renamed to `torch.library.register_fake`. Please use that instead; we will remove `torch.library.impl_abstract` in a future version of PyTorch.', category=FutureWarning)\ndef impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):\n if func is not None:\n _stacklevel = _stacklevel + 1\n return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)", + "docstring": "This API was renamed to :func: in PyTorch 2.4. Please use that instead.", + "type": "function", + "file_path": "pytorch\\torch\\library.py", + "ast_data": "FunctionDef name:impl_abstract arg:qualname arg:func arguments arg arg arg arg If Compare Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self) -> str:\n return self._name", + "docstring": "Name of this tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "identity", + "source_code": "def identity(self, x: T) -> T:\n raise NotImplementedError", + "docstring": "Returns x as is. This is used to trigger CSE.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:identity arg:self arg:x arguments arg arg Raise" + }, + { + "library": "sphinx", + "name": "prefixed_warnings", + "source_code": "@contextmanager\ndef prefixed_warnings(prefix: str) -> Iterator[None]:\n logger = logging.getLogger(NAMESPACE)\n warning_handler = None\n for handler in logger.handlers:\n if isinstance(handler, WarningStreamHandler):\n warning_handler = handler\n break\n else:\n yield\n return\n prefix_filter = None\n for _filter in warning_handler.filters:\n if isinstance(_filter, MessagePrefixFilter):\n prefix_filter = _filter\n break\n if prefix_filter:\n try:\n previous = prefix_filter.prefix\n prefix_filter.prefix = prefix\n yield\n finally:\n prefix_filter.prefix = previous\n else:\n prefix_filter = MessagePrefixFilter(prefix)\n try:\n warning_handler.addFilter(prefix_filter)\n yield\n finally:\n warning_handler.removeFilter(prefix_filter)", + "docstring": "Context manager to prepend prefix to all warning log records temporarily. For example:: >>> with prefixed_warnings(\"prefix:\"): >>> logger.warning('Warning message!') # => prefix: Warning message! .. versionadded:: 2.0", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "FunctionDef name:prefixed_warnings arg:prefix arguments arg Assign Call Assign For If Call Assign Return return:no Assign For If Call Assign If Try Assign Assign Assign Assign Call Try Call Call" + }, + { + "library": "scipy", + "name": "get_arrays_tol", + "source_code": "def get_arrays_tol(*arrays):\n if len(arrays) == 0:\n raise ValueError('At least one array must be provided.')\n size = max((array.size for array in arrays))\n weight = max((np.max(np.abs(array[np.isfinite(array)]), initial=1.0) for array in arrays))\n return 10.0 * EPS * max(size, 1.0) * weight", + "docstring": "Get a relative tolerance for a set of arrays. Borrowed from COBYQA Parameters ---------- *arrays: tuple Set of to get the tolerance for. Returns ------- float Relative tolerance for the set of arrays. Raises ------ ValueError If no array is provided.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", + "ast_data": "FunctionDef name:get_arrays_tol arguments arg If Compare Call Raise Call Assign Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "Raised when a version string is not a valid version. >>> Version(\"invalid\") Traceback (most recent call last): ... packaging.version.InvalidVersion: Invalid version: 'invalid'", + "type": "class", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "tensorflow", + "name": "bincount_v1", + "source_code": "@tf_export(v1=['math.bincount', 'bincount'])\n@deprecation.deprecated_endpoints('bincount')\ndef bincount_v1(arr, weights=None, minlength=None, maxlength=None, dtype=dtypes.int32):\n return bincount(arr, weights, minlength, maxlength, dtype)", + "docstring": "Counts the number of occurrences of each value in an integer array. If and are not given, returns a vector with length if is non-empty, and length 0 otherwise. If are non-None, then index of the output stores the sum of the value in at each index where the corresponding value in is . Args: arr: An int32 tensor of non-negative values. weights: If non-None, must be the same shape as arr. For each value in , the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least , padding with zeros at the end if necessary. maxlength: If given, skips values in that are equal or greater than , ensuring that the output has length at most . dtype: If is None, determines the type of the output bins. Returns: A vector with the same dtype as or the given . The bin values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\bincount_ops.py", + "ast_data": "FunctionDef name:bincount_v1 arg:arr arg:weights arg:minlength arg:maxlength arg:dtype arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "model_to_dict", + "source_code": "def model_to_dict(instance, fields=None, exclude=None):\n opts = instance._meta\n data = {}\n for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):\n if not getattr(f, 'editable', False):\n continue\n if fields is not None and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n data[f.name] = f.value_from_object(instance)\n return data", + "docstring": "Return a dict containing the data in `` argument.", + "type": "function", + "file_path": "django\\django\\forms\\models.py", + "ast_data": "FunctionDef name:model_to_dict arg:instance arg:fields arg:exclude arguments arg arg arg Assign Assign For Call If Call If BoolOp Compare Compare If BoolOp Compare Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "kind", + "source_code": "@property\ndef kind(self):\n return getattr(self.queryables.get(self.lhs), 'kind', None)", + "docstring": "the kind of my field", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:kind arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_maybe_get_dtype", + "source_code": "def _maybe_get_dtype(x):\n if isinstance(x, numbers.Real):\n return x\n if isinstance(x, indexed_slices.IndexedSlices) or tensor_util.is_tf_type(x):\n return _to_numpy_type(x.dtype)\n if isinstance(x, dtypes.DType):\n return x.as_numpy_dtype\n if isinstance(x, (list, tuple)):\n raise ValueError(f'Cannot find dtype for type inference from argument `x` of a sequence type {type(x)}. For sequences, please call this function on each element individually.')\n return x", + "docstring": "Returns a numpy type if available from x. Skips if x is numpy.ndarray.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:_maybe_get_dtype arg:x arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call If Call Return return:yes If Call Raise Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "public_key", + "source_code": "@abc.abstractmethod\ndef public_key(self) -> RSAPublicKey:\n pass", + "docstring": "The RSAPublicKey associated with this private key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:public_key arg:self arguments arg" + }, + { + "library": "authlib", + "name": "get_jwt_config", + "source_code": "def get_jwt_config(self, grant):\n raise NotImplementedError()", + "docstring": "Get the JWT configuration for OpenIDCode extension. The JWT configuration will be used to generate ``. Developers MUST implement this method in subclass, e.g.:: def get_jwt_config(self, grant): return { \"key\": read_private_key_file(key_path), \"alg\": \"RS256\", \"iss\": \"issuer-identity\", \"exp\": 3600, } :param grant: AuthorizationCodeGrant instance :return: dict", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py", + "ast_data": "FunctionDef name:get_jwt_config arg:self arg:grant arguments arg arg Raise Call" + }, + { + "library": "django", + "name": "set_as_test_mirror", + "source_code": "def set_as_test_mirror(self, primary_settings_dict):\n self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']", + "docstring": "Set this database up to be used in testing as a mirror of a primary database whose settings are given.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\creation.py", + "ast_data": "FunctionDef name:set_as_test_mirror arg:self arg:primary_settings_dict arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "poles", + "source_code": "@property\ndef poles(self):\n return self.to_zpk().poles", + "docstring": "Poles of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:poles arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_calculate_scores", + "source_code": "def _calculate_scores(self, query, key):\n return NotImplementedError", + "docstring": "Calculates attention scores. Args: query: Query tensor of shape . key: Key tensor of shape . Returns: Tensor of shape .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py", + "ast_data": "FunctionDef name:_calculate_scores arg:self arg:query arg:key arguments arg arg arg Return return:yes" + }, + { + "library": "scipy", + "name": "_qmvt", + "source_code": "def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10):\n sn = max(1.0, np.sqrt(nu))\n low = np.asarray(low, dtype=np.float64)\n high = np.asarray(high, dtype=np.float64)\n cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn)\n n = cho.shape[0]\n q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1))\n rndm = rng.random(size=(n_batches, n))\n prob, est_error, n_samples = _qmvt_inner(q, rndm, int(n_qmc_samples), int(n_batches), cho, lo, hi, float(nu))\n return (prob, est_error, n_samples)", + "docstring": "Multivariate t integration over box bounds. Parameters ---------- m : int > n_batches The number of points to sample. This number will be divided into batches that apply random offsets of the sampling lattice for each batch in order to estimate the error. nu : float >= 0 The shape parameter of the multivariate t distribution. covar : (n, n) float array Possibly singular, positive semidefinite symmetric covariance matrix. low, high : (n,) float array The low and high integration bounds. rng : Generator, optional default_rng(), yada, yada lattice : 'cbc' or callable The type of lattice rule to use to construct the integration points. n_batches : int > 0, optional The number of QMC batches to apply. Returns ------- prob : float The estimated probability mass within the bounds. est_error : float 3 times the standard error of the batch estimates. n_samples : int The number of samples actually used.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmvnt.py", + "ast_data": "FunctionDef name:_qmvt arg:m arg:nu arg:covar arg:low arg:high arg:rng arg:lattice arg:n_batches arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "Problem14", + "source_code": "class Problem14(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(0.0, 4.0)]\n self.global_optimum = 0.224885\n self.fglob = -0.788685\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return -exp(-x) * sin(2.0 * pi * x)", + "docstring": "Univariate Problem14 objective function. This class defines the Univariate Problem14 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem14}}(x) = -e^{-x} \\sin(2\\pi x) Bound constraints: :math: .. figure:: figures/Problem14.png :alt: Univariate Problem14 function :align: center **Univariate Problem14 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem14 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "assert_non_positive_v2", + "source_code": "@tf_export('debugging.assert_non_positive', v1=[])\n@dispatch.add_dispatch_support\ndef assert_non_positive_v2(x, message=None, summarize=None, name=None):\n return assert_non_positive(x=x, summarize=summarize, message=message, name=name)", + "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not <= 0 everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_non_positive\". Returns: Op raising unless is all non-positive. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_non_positive_v2 arg:x arg:message arg:summarize arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_add_fixed_len_feature", + "source_code": "def _add_fixed_len_feature(self, key, feature):\n if not feature.dtype:\n raise ValueError(f'Missing type for feature {key}. Received feature={feature}.')\n if feature.shape is None:\n raise ValueError(f'Missing shape for feature {key}. Received feature={feature}.')\n feature_tensor_shape = tensor_shape.as_shape(feature.shape)\n if feature.shape and feature_tensor_shape.ndims and (feature_tensor_shape.dims[0].value is None):\n raise ValueError(f'First dimension of shape for feature {key} unknown. Consider using FixedLenSequenceFeature. Received feature={feature}.')\n if feature.shape is not None and (not feature_tensor_shape.is_fully_defined()):\n raise ValueError(f'All dimensions of shape for feature {key} need to be known but received {feature.shape!s}.')\n self.dense_keys.append(key)\n self.dense_shapes.append(tensor_shape.as_shape(feature.shape))\n self.dense_types.append(feature.dtype)\n if feature.default_value is not None:\n self.dense_defaults[key] = feature.default_value", + "docstring": "Adds a FixedLenFeature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_add_fixed_len_feature arg:self arg:key arg:feature arguments arg arg arg If Raise Call If Compare Raise Call Assign Call If BoolOp Compare Raise Call If BoolOp Compare Call Raise Call Call Call Call Call If Compare Assign" + }, + { + "library": "scikit-learn", + "name": "TransformerMixin", + "source_code": "class TransformerMixin(_SetOutputMixin):\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.transformer_tags = TransformerTags()\n return tags\n\n def fit_transform(self, X, y=None, **fit_params):\n if _routing_enabled():\n transform_params = self.get_metadata_routing().consumes(method='transform', params=fit_params.keys())\n if transform_params:\n warnings.warn(f\"This object ({self.__class__.__name__}) has a `transform` method which consumes metadata, but `fit_transform` does not forward metadata to `transform`. Please implement a custom `fit_transform` method to forward metadata to `transform` as well. Alternatively, you can explicitly do `set_transform_request`and set all values to `False` to disable metadata routed to `transform`, if that's an option.\", UserWarning)\n if y is None:\n return self.fit(X, **fit_params).transform(X)\n else:\n return self.fit(X, y, **fit_params).transform(X)", + "docstring": "Mixin class for all transformers in scikit-learn. This mixin defines the following functionality: - a method that delegates to and ; - a method to output as a specific container type. If :term: is defined, then :class: will automatically wrap and to follow the API. See the :ref: for details. :class: and :class: are helpful mixins for defining :term:. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, TransformerMixin >>> class MyTransformer(TransformerMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... return self ... def transform(self, X): ... return np.full(shape=len(X), fill_value=self.param) >>> transformer = MyTransformer() >>> X = [[1, 2], [2, 3], [3, 4]] >>> transformer.fit_transform(X) array([1, 1, 1])", + "type": "class", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "ClassDef name:TransformerMixin FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Call Return return:yes FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Call Call If Call If Compare Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "must_record_gradient", + "source_code": "def must_record_gradient():\n return False", + "docstring": "Import backprop if you want gradients recorded.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py", + "ast_data": "FunctionDef name:must_record_gradient arguments Return return:yes" + }, + { + "library": "pytorch", + "name": "_sync_param_groups", + "source_code": "@staticmethod\ndef _sync_param_groups(src_param_groups: list[dict[Any, Any]], dst_param_groups: list[dict[Any, Any]]) -> None:\n assert len(src_param_groups) == len(dst_param_groups), 'Mismatch between number of source and destination parameter groups'\n for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups):\n for attr in filter(lambda x: x != 'params', src_param_group.keys()):\n dst_param_group[attr] = src_param_group[attr]", + "docstring": "Sync the attributes from the source parameter groups to the destination parameter groups. Example attributes include learning rate or scheduler attributes. The two parameter groups should have the same length (i.e. same number of parameter groups). Arguments: src_param_groups (list[dict]): parameter groups giving the attribute settings to copy. dst_param_groups (list[dict]): parameter groups giving the attribute settings to set.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_sync_param_groups arg:src_param_groups arg:dst_param_groups arguments arg arg Compare Call Call For Call For Call arguments arg Compare Call Assign" + }, + { + "library": "pytorch", + "name": "manual_seed_all", + "source_code": "def manual_seed_all(seed: int) -> None:\n seed = int(seed)\n\n def cb():\n for i in range(device_count()):\n default_generator = torch.xpu.default_generators[i]\n default_generator.manual_seed(seed)\n _lazy_call(cb, seed_all=True)", + "docstring": "Set the seed for generating random numbers on all GPUs. It's safe to call this function if XPU is not available; in that case, it is silently ignored. Args: seed (int): The desired seed.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\random.py", + "ast_data": "FunctionDef name:manual_seed_all arg:seed arguments arg Assign Call FunctionDef name:cb arguments For Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "hook_with_zero_interleaved_fn", + "source_code": "def hook_with_zero_interleaved_fn(state, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n fut = hook(state, bucket)\n _hook_with_zero_step_setup(ddp_ref, zero, bucket)\n if zero._overlap_info.status != _OverlapStatus.INITIALIZED:\n return fut\n\n def zero_step(fut: torch.futures.Future) -> torch.Tensor:\n overlap_info = zero._overlap_info\n bucket_index = bucket.index()\n rank = zero.global_rank\n assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]\n overlap_info.bucket_indices_seen.append(bucket_index)\n if rank in assigned_ranks:\n _perform_local_step(bucket, zero, rank)\n _broadcast_bucket(bucket_index, zero)\n num_buckets = len(overlap_info.params_per_bucket)\n if len(overlap_info.bucket_indices_seen) == num_buckets:\n overlap_info.wait_for_broadcasts()\n overlap_info.clear_per_iter_info()\n return bucket.buffer()\n return fut.then(zero_step)", + "docstring": "Return :class: that gives gradient bucket tensor and performs partial :class: :meth:. This function uses the gradients in gradient in given bucket to perform a partial :class: :meth: Arguments: state: any state for the hook. bucket (dist.GradBucket): the :class: gradient bucket.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py", + "ast_data": "FunctionDef name:hook_with_zero_interleaved_fn arg:state arg:bucket arguments arg arg Assign Call Call If Compare Return return:yes FunctionDef name:zero_step arg:fut arguments arg Assign Assign Call Assign Assign Call If Compare Call Call Assign Call If Compare Call Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "catalog", + "source_code": "def catalog():\n global _default\n t = getattr(_active, 'value', None)\n if t is not None:\n return t\n if _default is None:\n _default = translation(settings.LANGUAGE_CODE)\n return _default", + "docstring": "Return the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string.", + "type": "function", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:catalog arguments Assign Call If Compare Return return:yes If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "dos2unix", + "source_code": "def dos2unix(file):\n if os.path.isdir(file):\n print(file, 'Directory!')\n return\n with open(file, 'rb') as fp:\n data = fp.read()\n if '\\x00' in data:\n print(file, 'Binary!')\n return\n newdata = re.sub('\\r\\n', '\\n', data)\n if newdata != data:\n print('dos2unix:', file)\n with open(file, 'wb') as f:\n f.write(newdata)\n return file\n else:\n print(file, 'ok')", + "docstring": "Replace CRLF with LF in argument files. Print names of changed files.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\line_endings.py", + "ast_data": "FunctionDef name:dos2unix arg:file arguments arg If Call Call Return return:no With Call Assign Call If Compare Call Return return:no Assign Call If Compare Call With Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_path", + "source_code": "def get_path(self):\n _path, fillable = self._get_path_in_displaycoord()\n if np.iterable(fillable):\n _path = Path.make_compound_path(*_path)\n return self.get_transform().inverted().transform_path(_path)", + "docstring": "Return the path of the arrow in the data coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_path arg:self arguments arg Assign Call If Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_InitializeValues", + "source_code": "def _InitializeValues(self, values):\n self._values = set()\n for x in values:\n if isinstance(x, tensor_lib.Tensor):\n self._values.add(x.name)\n else:\n raise TypeError(f\"'values' must be a list of Tensors. Received: {type(x)}.\")", + "docstring": "Makes the values known to this context.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_InitializeValues arg:self arg:values arguments arg arg Assign Call For If Call Call Raise Call Call" + }, + { + "library": "pandas", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "An invalid version was found, users should refer to PEP 440. The `` exception is raised when a version string is improperly formatted. Pandas uses this exception to ensure that all version strings are PEP 440 compliant. See Also -------- util.version.Version : Class for handling and parsing version strings. Examples -------- >>> pd.util.version.Version(\"1.\") Traceback (most recent call last): InvalidVersion: Invalid version: '1.'", + "type": "class", + "file_path": "pandas\\pandas\\util\\version\\__init__.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "pytorch", + "name": "_PositiveSemidefinite", + "source_code": "class _PositiveSemidefinite(_Symmetric):\n\n def check(self, value):\n sym_check = super().check(value)\n if not sym_check.all():\n return sym_check\n return torch.linalg.eigvalsh(value).ge(0).all(-1)", + "docstring": "Constrain to positive-semidefinite matrices.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_PositiveSemidefinite FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Call If Call Return return:yes Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "is_enabled", + "source_code": "@staticmethod\ndef is_enabled():\n return torch._C._check_sparse_tensor_invariants()", + "docstring": "Return True if the sparse tensor invariants checking is enabled. .. note:: Use :func: or :func: to manage the state of the sparse tensor invariants checks.", + "type": "method", + "file_path": "pytorch\\torch\\sparse\\__init__.py", + "ast_data": "FunctionDef name:is_enabled arguments Return return:yes Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, origin: Tensor, direction: Tensor) -> None:\n super().__init__()\n self._origin = Parameter(origin)\n self._direction = Parameter(direction)", + "docstring": "Initialize a parametrized line of direction and origin. Args: origin: any point on the line of any dimension. direction: the normalized vector direction of any dimension. Example: >>> o = torch.tensor([0.0, 0.0]) >>> d = torch.tensor([1.0, 1.0]) >>> l = ParametrizedLine(o, d)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\line.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:origin arg:direction arguments arg arg arg Call Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_add_fixed_qparams_to_dtype_configs", + "source_code": "def _add_fixed_qparams_to_dtype_configs(dtype_configs: list[DTypeConfig], constraints: DTypeWithConstraints) -> list[DTypeConfig]:\n new_dtype_configs = []\n for dtype_config in dtype_configs:\n dc = copy.deepcopy(dtype_config)\n for orig_constraints in [dc.input_dtype_with_constraints, dc.output_dtype_with_constraints]:\n if orig_constraints.dtype != constraints.dtype:\n continue\n if orig_constraints.scale_min_lower_bound is not None:\n raise ValueError(f'scale_min_lower_bound is invalid for fixed qparams ops: {dtype_config}')\n if orig_constraints.scale_max_upper_bound is not None:\n raise ValueError(f'scale_max_upper_bound is invalid for fixed qparams ops: {dtype_config}')\n orig_constraints.quant_min_lower_bound = constraints.quant_min_lower_bound\n orig_constraints.quant_max_upper_bound = constraints.quant_max_upper_bound\n orig_constraints.scale_exact_match = constraints.scale_exact_match\n orig_constraints.zero_point_exact_match = constraints.zero_point_exact_match\n new_dtype_configs.append(dc)\n return new_dtype_configs", + "docstring": "Return a copy of the list of DTypeConfigs where activations are subject to the specified constraints required for fixed qparams ops. If the data type doesn't match the one in the constraints, simply leave the corresponding DTypeConfig unchanged. If or is specified in the activations, throw an exception since these settings are incompatible with fixed qparams ops.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\_common_operator_config_utils.py", + "ast_data": "FunctionDef name:_add_fixed_qparams_to_dtype_configs arg:dtype_configs arg:constraints arguments arg arg Assign For Assign Call For If Compare If Compare Raise Call If Compare Raise Call Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "group_fn", + "source_code": "def group_fn(self, sizes: Sequence[Sequence[sympy.Expr]]) -> tuple[tuple[sympy.Expr, ...], ...]:\n raise NotImplementedError", + "docstring": "Process the iteration sizes in case a transformation needs to be applied.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:group_fn arg:self arg:sizes arguments arg arg Raise" + }, + { + "library": "django", + "name": "alter_db_tablespace", + "source_code": "def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):\n self.execute(self.sql_retablespace_table % {'table': self.quote_name(model._meta.db_table), 'old_tablespace': self.quote_name(old_db_tablespace), 'new_tablespace': self.quote_name(new_db_tablespace)})", + "docstring": "Move a model's table between tablespaces.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:alter_db_tablespace arg:self arg:model arg:old_db_tablespace arg:new_db_tablespace arguments arg arg arg arg Call Call Call Call" + }, + { + "library": "django", + "name": "z", + "source_code": "@z.setter\ndef z(self, value):\n if not self.hasz:\n raise GEOSException('Cannot set Z on 2D Point.')\n self._cs.setOrdinate(2, 0, value)", + "docstring": "Set the Z component of the Point.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:z arg:self arg:value arguments arg arg If Raise Call Call" + }, + { + "library": "tensorflow", + "name": "LogicalDeviceConfiguration", + "source_code": "@tf_export('config.LogicalDeviceConfiguration', 'config.experimental.VirtualDeviceConfiguration')\nclass LogicalDeviceConfiguration(collections.namedtuple('LogicalDeviceConfiguration', ['memory_limit', 'experimental_priority', 'experimental_device_ordinal'])):\n\n def __new__(cls, memory_limit=None, experimental_priority=None, experimental_device_ordinal=None):\n return super().__new__(cls, memory_limit, experimental_priority, experimental_device_ordinal)", + "docstring": "Configuration class for a logical devices. The class specifies the parameters to configure a as it is initialized to a during runtime initialization. Not all fields are valid for all device types. See and for usage examples. Fields: memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual device. Currently only supported for GPUs. experimental_priority: (optional) Priority to assign to a virtual device. Lower values have higher priorities and 0 is the default. Within a physical GPU, the GPU scheduler will prioritize ops on virtual devices with higher priority. Currently only supported for Nvidia GPUs. experimental_device_ordinal: (optional) Ordinal number to order the virtual device. LogicalDevice with lower ordinal number will receive a lower device id. Physical device id and location in the list is used to break ties. Currently only supported for Nvidia GPUs.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "ClassDef name:LogicalDeviceConfiguration Call FunctionDef name:__new__ arg:cls arg:memory_limit arg:experimental_priority arg:experimental_device_ordinal arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n feature_names = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, (fc_types.FeatureColumn, fc_old._FeatureColumn)):\n feature_names.append(key.name)\n else:\n feature_names.append(key)\n return '_X_'.join(sorted(feature_names))", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Assign For Call If Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "verbose", + "source_code": "class verbose:\n\n def __init__(self, level):\n self.level = level\n\n def __enter__(self):\n if self.level == VERBOSE_OFF:\n return\n st = torch._C._verbose.mkldnn_set_verbose(self.level)\n assert st, 'Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope.'\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)\n return False", + "docstring": "On-demand oneDNN (former MKL-DNN) verbosing functionality. To make it easier to debug performance issues, oneDNN can dump verbose messages containing information like kernel size, input data size and execution duration while executing the kernel. The verbosing functionality can be invoked via an environment variable named . However, this methodology dumps messages in all steps. Those are a large amount of verbose messages. Moreover, for investigating the performance issues, generally taking verbose messages for one single iteration is enough. This on-demand verbosing functionality makes it possible to control scope for verbose message dumping. In the following example, verbose messages will be dumped out for the second inference only. .. highlight:: python .. code-block:: python import torch model(data) with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON): model(data) Args: level: Verbose level - ``: Enable verbosing, including oneDNN kernel creation", + "type": "class", + "file_path": "pytorch\\torch\\backends\\mkldnn\\__init__.py", + "ast_data": "ClassDef name:verbose FunctionDef name:__init__ arg:self arg:level arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Return return:no Assign Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_unpack_tensor", + "source_code": "def _unpack_tensor(self, parallel_tensor):\n if not isinstance(parallel_tensor, (tensor_lib.Tensor, composite_tensor.CompositeTensor, variables.Variable)):\n raise ValueError('Expected a tensor, got {}.'.format(parallel_tensor))\n with ops.device(self._name):\n return tpu_ops.tpu_replicated_output(parallel_tensor, num_replicas=len(self.components))", + "docstring": "Helper to unpack a single tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py", + "ast_data": "FunctionDef name:_unpack_tensor arg:self arg:parallel_tensor arguments arg arg If Call Raise Call Call With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "register_type_spec_from_value_converter", + "source_code": "def register_type_spec_from_value_converter(type_object, converter_fn, allow_subclass=False):\n _, type_object = tf_decorator.unwrap(type_object)\n _TYPE_CONVERSION_FUNCTION_REGISTRY.append((type_object, converter_fn, allow_subclass))", + "docstring": "Registers a function for converting values with a given type to TypeSpecs. If multiple registered s match a value, then the most recent registration takes precedence. Custom converters should not be defined for s; use instead. Args: type_object: A Python object representing the type of values accepted by . converter_fn: A function that takes one argument (an instance of the type represented by ) and returns a . allow_subclass: If true, then use to check for matches. If false, then use .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:register_type_spec_from_value_converter arg:type_object arg:converter_fn arg:allow_subclass arguments arg arg arg Assign Call Call" + }, + { + "library": "numpy", + "name": "get_mathlibs", + "source_code": "def get_mathlibs(path=None):\n if path is not None:\n config_file = os.path.join(path, '_numpyconfig.h')\n else:\n dirs = get_numpy_include_dirs()\n for path in dirs:\n fn = os.path.join(path, '_numpyconfig.h')\n if os.path.exists(fn):\n config_file = fn\n break\n else:\n raise DistutilsError('_numpyconfig.h not found in numpy include dirs %r' % (dirs,))\n with open(config_file) as fid:\n mathlibs = []\n s = '#define MATHLIB'\n for line in fid:\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n return mathlibs", + "docstring": "Return the MATHLIB line from numpyconfig.h", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:get_mathlibs arg:path arguments arg If Compare Assign Call Assign Call For Assign Call If Call Assign Raise Call With Call Assign Assign For If Call Assign Call Call If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "serialize_many_sparse_v2", + "source_code": "@tf_export('io.serialize_many_sparse', v1=[])\n@dispatch.add_dispatch_support\ndef serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n sp_input = _convert_to_sparse_tensor(sp_input)\n return gen_sparse_ops.serialize_many_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)", + "docstring": "Serialize -minibatch into an . The must have rank greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the must be sorted in increasing order of this first dimension. The serialized objects going into each row of the output will have rank . The minibatch size is extracted from . Args: sp_input: The input rank . out_type: The to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A matrix (2-D ) with rows and columns. Each column represents serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:serialize_many_sparse_v2 arg:sp_input arg:out_type arg:name arguments arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "update_or_create", + "source_code": "def update_or_create(self, defaults=None, create_defaults=None, **kwargs):\n update_defaults = defaults or {}\n if create_defaults is None:\n create_defaults = update_defaults\n self._for_write = True\n with transaction.atomic(using=self.db):\n obj, created = self.select_for_update().get_or_create(create_defaults, **kwargs)\n if created:\n return (obj, created)\n for k, v in resolve_callables(update_defaults):\n setattr(obj, k, v)\n update_fields = set(update_defaults)\n concrete_field_names = self.model._meta._non_pk_concrete_field_names\n if concrete_field_names.issuperset(update_fields):\n pk_fields = self.model._meta.pk_fields\n for field in self.model._meta.local_concrete_fields:\n if not (field in pk_fields or field.__class__.pre_save is Field.pre_save):\n update_fields.add(field.name)\n if field.name != field.attname:\n update_fields.add(field.attname)\n obj.save(using=self.db, update_fields=update_fields)\n else:\n obj.save(using=self.db)\n return (obj, False)", + "docstring": "Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Optionally, an object can be created with different values than defaults by using create_defaults. Return a tuple (object, created), where created is a boolean specifying whether an object was created.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:update_or_create arg:self arg:defaults arg:create_defaults arguments arg arg arg arg Assign BoolOp If Compare Assign Assign With Call Assign Call Call If Return return:yes For Call Call Assign Call Assign If Call Assign For If BoolOp Compare Compare Call If Compare Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):\n dim, loc, shape, df = self._process_parameters(loc, shape, df)\n if random_state is not None:\n rng = check_random_state(random_state)\n else:\n rng = self._random_state\n if np.isinf(df):\n x = np.ones(size)\n else:\n x = rng.chisquare(df, size=size) / df\n z = rng.multivariate_normal(np.zeros(dim), shape, size=size)\n samples = loc + z / np.sqrt(x)[..., None]\n return _squeeze_output(samples)", + "docstring": "Draw random samples from a multivariate t-distribution. Parameters ---------- %(_mvt_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random variable. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.rvs(loc, shape, df) array([[0.93477495, 3.00408716]])", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:loc arg:shape arg:df arg:size arg:random_state arguments arg arg arg arg arg arg Assign Call If Compare Assign Call Assign If Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ConcreteFunctionGarbageCollector", + "source_code": "class ConcreteFunctionGarbageCollector:\n __slots__ = ['_func_graph']\n\n def __init__(self, func_graph):\n self._func_graph = func_graph\n\n def release(self):\n self._func_graph = None\n\n def __del__(self):\n if func_graph_module is None or self._func_graph is None:\n return\n try:\n func_graph_module.dismantle_func_graph(self._func_graph)\n except:\n pass", + "docstring": "Cleans up reference cycles when a goes out of scope.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "ClassDef name:ConcreteFunctionGarbageCollector Assign FunctionDef name:__init__ arg:self arg:func_graph arguments arg arg Assign FunctionDef name:release arg:self arguments arg Assign FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Return return:no Try Call ExceptHandler" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n self.fit(X)\n return self.embedding_", + "docstring": "Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. If affinity is \"precomputed\" X : {array-like, sparse matrix} of shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : array-like of shape (n_samples, n_components) Spectral embedding of the training matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "dviFontName", + "source_code": "def dviFontName(self, dvifont):\n dvi_info = self._dviFontInfo.get(dvifont.texname)\n if dvi_info is not None:\n return dvi_info.pdfname\n tex_font_map = dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))\n psfont = tex_font_map[dvifont.texname]\n if psfont.filename is None:\n raise ValueError('No usable font file found for {} (TeX: {}); the font may lack a Type-1 version'.format(psfont.psname, dvifont.texname))\n pdfname = next(self._internal_font_seq)\n _log.debug('Assigning font %s = %s (dvi)', pdfname, dvifont.texname)\n self._dviFontInfo[dvifont.texname] = types.SimpleNamespace(dvifont=dvifont, pdfname=pdfname, fontfile=psfont.filename, basefont=psfont.psname, encodingfile=psfont.encoding, effects=psfont.effects)\n return pdfname", + "docstring": "Given a dvi font object, return a name suitable for Op.selectfont. This registers the font information internally (in ``) if not yet registered.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:dviFontName arg:self arg:dvifont arguments arg arg Assign Call If Compare Return return:yes Assign Call Call Assign If Compare Raise Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "all_parents", + "source_code": "@cached_property\ndef all_parents(self):\n result = OrderedSet(self.parents)\n for parent in self.parents:\n for ancestor in parent._meta.all_parents:\n result.add(ancestor)\n return tuple(result)", + "docstring": "Return all the ancestors of this model as a tuple ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage.", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:all_parents arg:self arguments arg Assign Call For For Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "validate_distributed_dataset_inputs", + "source_code": "def validate_distributed_dataset_inputs(distribution_strategy, x, y, sample_weights=None):\n x_values_list = validate_per_replica_inputs(distribution_strategy, x)\n if y is not None:\n y_values_list = validate_per_replica_inputs(distribution_strategy, y)\n else:\n y_values_list = None\n if sample_weights is not None:\n sample_weights_list = validate_per_replica_inputs(distribution_strategy, sample_weights)\n else:\n sample_weights_list = None\n return (x_values_list, y_values_list, sample_weights_list)", + "docstring": "Validate all the components of a DistributedValue Dataset input. Args: distribution_strategy: The current DistributionStrategy used to call /. x: Input Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. x can also be a tuple or dict. The keys of the dict should match the names of the input layers of the model. y: Target Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. y can also be a tuple or dict. The keys of the dict should match the names of the output layers of the model. sample_weights: Sample weights Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. Returns: The unwrapped values list of the x and y DistributedValues inputs. Raises: ValueError: If x and y do not have support for being evaluated as tensors. or if x and y contain elements that are not tensors or if x and y contain elements that have a shape or dtype mismatch.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:validate_distributed_dataset_inputs arg:distribution_strategy arg:x arg:y arg:sample_weights arguments arg arg arg arg Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "expandtabs", + "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_expandtabs_dispatcher)\ndef expandtabs(a, tabsize=8):\n a = np.asanyarray(a)\n tabsize = np.asanyarray(tabsize)\n if a.dtype.char == 'T':\n return _expandtabs(a, tabsize)\n buffersizes = _expandtabs_length(a, tabsize)\n out_dtype = f'{a.dtype.char}{buffersizes.max()}'\n out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype)\n return _expandtabs(a, tabsize, out=out)", + "docstring": "Return a copy of each string element where all tab characters are replaced by one or more spaces. Calls :meth: element-wise. Return a copy of each string element where all tab characters are replaced by one or more spaces, depending on the current column and the given . The column number is reset to zero after each newline occurring in the string. This doesn't understand other non-printing characters or escape sequences. Parameters ---------- a : array-like, with `tabsize` dtype, depending on input type See Also -------- str.expandtabs Examples -------- >>> import numpy as np >>> a = np.array([' Hello world']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='>> info = np.__array_namespace_info__() >>> info.default_dtypes() {'real floating': numpy.float64, 'complex floating': numpy.complex128, 'integral': numpy.int64, 'indexing': numpy.int64}", + "type": "method", + "file_path": "numpy\\numpy\\_array_api_info.py", + "ast_data": "FunctionDef name:default_dtypes arg:self arguments arg arg If Compare Raise Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "make_composite_tensor", + "source_code": "def make_composite_tensor(cls, module_name='tf.linalg'):\n spec_name = '{}Spec'.format(cls.__name__)\n spec_type = type(spec_name, (_LinearOperatorSpec,), {'value_type': cls})\n type_spec_registry.register('{}.{}'.format(module_name, spec_name))(spec_type)\n cls._type_spec = property(spec_type.from_operator)\n return cls", + "docstring": "Class decorator to convert s to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:make_composite_tensor arg:cls arg:module_name arguments arg arg Assign Call Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ragged_op_signature", + "source_code": "def _ragged_op_signature(op, ragged_args, ragged_varargs=False):\n op_name = tf_export.get_canonical_name_for_symbol(op)\n argspec = tf_inspect.getfullargspec(op)\n arg_names = argspec.args\n for pos in ragged_args:\n arg_names[pos] = '**' + arg_names[pos] + '**'\n if argspec.defaults is not None:\n for pos in range(-1, -len(argspec.defaults) - 1, -1):\n arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos])\n if argspec.varargs:\n if ragged_varargs:\n arg_names.append('***' + argspec.varargs + '**')\n else:\n arg_names.append('*' + argspec.varargs)\n if argspec.varkw:\n arg_names.append('**' + argspec.varkw)\n return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names))", + "docstring": "Returns a signature for the given op, marking ragged args in bold.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_dispatch.py", + "ast_data": "FunctionDef name:_ragged_op_signature arg:op arg:ragged_args arg:ragged_varargs arguments arg arg arg Assign Call Assign Call Assign For Assign If Compare For Call Call Call If If Call Call If Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "weight_intercept_raw", + "source_code": "def weight_intercept_raw(self, coef, X):\n weights, intercept = self.weight_intercept(coef)\n if not self.base_loss.is_multiclass:\n raw_prediction = X @ weights + intercept\n else:\n raw_prediction = X @ weights.T + intercept\n return (weights, intercept, raw_prediction)", + "docstring": "Helper function to get coefficients, intercept and raw_prediction. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order=\"F\"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Returns ------- weights : ndarray of shape (n_features,) or (n_classes, n_features) Coefficients without intercept term. intercept : float or ndarray of shape (n_classes,) Intercept terms. raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py", + "ast_data": "FunctionDef name:weight_intercept_raw arg:self arg:coef arg:X arguments arg arg arg Assign Call If Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "placeholder_value", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef placeholder_value(self, placeholder_context):\n return super().placeholder_value(placeholder_context)", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "compressed", + "source_code": "def compressed(x):\n return asanyarray(x).compressed()", + "docstring": "Return all the non-masked data as a 1-D array. This function is equivalent to calling the \"compressed\" method of a , see for details. See Also -------- ma.MaskedArray.compressed : Equivalent method. Examples -------- >>> import numpy as np Create an array with negative values masked: >>> import numpy as np >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) >>> masked_x = np.ma.masked_array(x, mask=x >> masked_x masked_array( data=[[1, --, 0], [2, --, 3], [7, 4, --]], mask=[[False, True, False], [False, True, False], [False, False, True]], fill_value=999999) Compress the masked array into a 1-D array of non-masked values: >>> np.ma.compressed(masked_x) array([1, 0, 2, 3, 7, 4])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:compressed arg:x arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_header_version", + "source_code": "def _get_header_version(path, name):\n for line in io.open(path, 'r', encoding='utf-8'):\n match = re.match('#define %s +(\\\\d+)' % name, line)\n if match:\n value = match.group(1)\n return int(value)\n raise ConfigError('#define \"{}\" is either\\n'.format(name) + ' not present in file {} OR\\n'.format(path) + ' its value is not an integer literal')", + "docstring": "Returns preprocessor defines in C header file.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_sycl_config.py", + "ast_data": "FunctionDef name:_get_header_version arg:path arg:name arguments arg arg For Call Assign Call If Assign Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "pytorch", + "name": "manual_seed_all", + "source_code": "def manual_seed_all(seed: int) -> None:\n seed = int(seed)\n\n def cb():\n for i in range(device_count()):\n default_generator = torch.cuda.default_generators[i]\n default_generator.manual_seed(seed)\n _lazy_call(cb, seed_all=True)", + "docstring": "Set the seed for generating random numbers on all GPUs. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\random.py", + "ast_data": "FunctionDef name:manual_seed_all arg:seed arguments arg Assign Call FunctionDef name:cb arguments For Call Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "draw_all", + "source_code": "@classmethod\ndef draw_all(cls, force=False):\n for manager in cls.get_all_fig_managers():\n if force or manager.canvas.figure.stale:\n manager.canvas.draw_idle()", + "docstring": "Redraw all stale managed figures, or, if *force* is True, all managed figures.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py", + "ast_data": "FunctionDef name:draw_all arg:cls arg:force arguments arg arg For Call If BoolOp Call" + }, + { + "library": "tensorflow", + "name": "_to_proto", + "source_code": "def _to_proto(self):\n raise NotImplementedError('{}._to_proto()'.format(type(self).__name__))", + "docstring": "Convert options to protocol buffer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py", + "ast_data": "FunctionDef name:_to_proto arg:self arguments arg Raise Call Call Call" + }, + { + "library": "pytorch", + "name": "pwlf_sac_tradeoff_curve", + "source_code": "def pwlf_sac_tradeoff_curve(self, n_segments: int=2, save_tradeoff_graphs: bool=False) -> None:\n for mod_fqn, sac_stats in self.sac_mod_stats.items():\n self.sac_mod_tradeoff_stats[mod_fqn] = self._get_sac_tradeoff_pwlf_stats(sac_stats=sac_stats, greedy_order_meta=self.sac_mod_greedy_order_meta[mod_fqn], n_segments=n_segments, save_tradeoff_graph=save_tradeoff_graphs, filename=mod_fqn)", + "docstring": "Fits a piecewise linear function with the specified sumber of segments to the SAC trade-off curve of discarded memory vs recomputation time. Args: n_segments (int, optional): The number of segments to be used for fitting the piecewise linear function to the trade-off curve. Defaults to 2. save_tradeoff_graphs (bool, optional): Whether to save the trade-off graphs to file. Defaults to False. If save_tradeoff_graphs is True, the trade-off graphs are saved to file using the module FQN as the filename.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py", + "ast_data": "FunctionDef name:pwlf_sac_tradeoff_curve arg:self arg:n_segments arg:save_tradeoff_graphs arguments arg arg arg For Call Assign Call" + }, + { + "library": "pandas", + "name": "get_locator", + "source_code": "def get_locator(self, dmin, dmax):\n tot_sec = (dmax - dmin).total_seconds()\n if abs(tot_sec) < self.minticks:\n self._freq = -1\n locator = MilliSecondLocator(self.tz)\n locator.set_axis(self.axis)\n locator.axis.set_view_interval(*self.axis.get_view_interval())\n locator.axis.set_data_interval(*self.axis.get_data_interval())\n return locator\n return mdates.AutoDateLocator.get_locator(self, dmin, dmax)", + "docstring": "Pick the best locator based on a distance.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", + "ast_data": "FunctionDef name:get_locator arg:self arg:dmin arg:dmax arguments arg arg arg Assign Call If Compare Call Assign Assign Call Call Call Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "_convert_to_side", + "source_code": "@classmethod\ndef _convert_to_side(cls, side_spec):\n from openpyxl.styles import Side\n _side_key_map = {'border_style': 'style'}\n if isinstance(side_spec, str):\n return Side(style=side_spec)\n side_kwargs = {}\n for k, v in side_spec.items():\n k = _side_key_map.get(k, k)\n if k == 'color':\n v = cls._convert_to_color(v)\n side_kwargs[k] = v\n return Side(**side_kwargs)", + "docstring": "Convert `` to an openpyxl v2 Side object. Parameters ---------- side_spec : str, dict A string specifying the border style, or a dict with zero or more of the following keys (or their synonyms). 'style' ('border_style') 'color' Returns ------- side : openpyxl.styles.Side", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py", + "ast_data": "FunctionDef name:_convert_to_side arg:cls arg:side_spec arguments arg arg Assign If Call Return return:yes Call Assign For Call Assign Call If Compare Assign Call Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "cumprod", + "source_code": "@array_function_dispatch(_cumprod_dispatcher)\ndef cumprod(a, axis=None, dtype=None, out=None):\n return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)", + "docstring": "Return the cumulative product of elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of , unless has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless is specified, in which case a reference to out is returned. See Also -------- cumulative_prod : Array API compatible alternative for `ufuncs-output-typeaa`: >>> np.cumprod(a,axis=1) array([[ 1, 2, 6], [ 4, 20, 120]])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:cumprod arg:a arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "reindex", + "source_code": "def reindex(self, target, method=None, level=None, limit: int | None=None, tolerance=None) -> tuple[Index, npt.NDArray[np.intp] | None]:\n if method is not None:\n raise NotImplementedError('argument method is not implemented for CategoricalIndex.reindex')\n if level is not None:\n raise NotImplementedError('argument level is not implemented for CategoricalIndex.reindex')\n if limit is not None:\n raise NotImplementedError('argument limit is not implemented for CategoricalIndex.reindex')\n return super().reindex(target)", + "docstring": "Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.Index Resulting index indexer : np.ndarray[np.intp] or None Indices of output values in original index", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\category.py", + "ast_data": "FunctionDef name:reindex arg:self arg:target arg:method arg:level arg:limit arg:tolerance arguments arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "rotate_from_to", + "source_code": "@classmethod\ndef rotate_from_to(cls, r1, r2):\n k = np.cross(r1, r2)\n nk = np.linalg.norm(k)\n th = np.arctan2(nk, np.dot(r1, r2))\n th /= 2\n if nk == 0:\n if np.dot(r1, r2) < 0:\n warnings.warn('Rotation defined by anti-parallel vectors is ambiguous')\n k = np.zeros(3)\n k[np.argmin(r1 * r1)] = 1\n k = np.cross(r1, k)\n k = k / np.linalg.norm(k)\n q = cls(0, k)\n else:\n q = cls(1, [0, 0, 0])\n else:\n q = cls(np.cos(th), k * np.sin(th) / nk)\n return q", + "docstring": "The quaternion for the shortest rotation from vector r1 to vector r2 i.e., q = sqrt(r2*r1'), normalized. If r1 and r2 are antiparallel, then the result is ambiguous; a normal vector will be returned, and a warning will be issued.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:rotate_from_to arg:cls arg:r1 arg:r2 arguments arg arg arg Assign Call Assign Call Assign Call Call If Compare If Compare Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_load_stack_frames", + "source_code": "def _load_stack_frames(self):\n stack_frames_iter = self._reader.stack_frames_iterator()\n for debug_event, _ in stack_frames_iter:\n stack_frame_with_id = debug_event.stack_frame_with_id\n file_line_col = stack_frame_with_id.file_line_col\n self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col\n unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())\n for stack_frame_id in unprocessed_stack_frame_ids:\n file_line_col = self._unprocessed_stack_frames[stack_frame_id]\n if len(self._host_name_file_path_to_offset) > file_line_col.file_index:\n host_name, file_path = list(self._host_name_file_path_to_offset.keys())[file_line_col.file_index]\n self._stack_frame_by_id[stack_frame_id] = (host_name, file_path, file_line_col.line, file_line_col.func)\n del self._unprocessed_stack_frames[stack_frame_id]", + "docstring": "Incrementally read the .stack_frames file. This must be called after _load_source_files(). It assumes that the following contract is honored by the writer of the tfdbg v2 data file set: - Before a stack frame is written to the .stack_frames file, the corresponding source file information must have been written to the .source_files file first.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:_load_stack_frames arg:self arguments arg Assign Call For Assign Assign Assign Assign Call Call For Assign If Compare Call Assign Call Call Assign" + }, + { + "library": "django", + "name": "get_distance", + "source_code": "def get_distance(self, f, dist_val, lookup_type):\n value = dist_val[0]\n geodetic = f.geodetic(self.connection)\n geography = f.geography\n if isinstance(value, Distance):\n if geography:\n dist_param = value.m\n elif geodetic:\n if lookup_type == 'dwithin':\n raise ValueError('Only numeric values of degree units are allowed on geographic DWithin queries.')\n dist_param = value.m\n else:\n dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))\n else:\n dist_param = value\n return [dist_param]", + "docstring": "Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", + "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:dist_val arg:lookup_type arguments arg arg arg arg Assign Assign Call Assign If Call If Assign If If Compare Raise Call Assign Assign Call Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "equal_levels", + "source_code": "def equal_levels(self, other: MultiIndex) -> bool:\n if self.nlevels != other.nlevels:\n return False\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True", + "docstring": "Return True if the levels of both MultiIndex objects are the same", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:equal_levels arg:self arg:other arguments arg arg If Compare Return return:yes For Call If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_reflection_axis", + "source_code": "def _check_reflection_axis(self, reflection_axis):\n if reflection_axis.shape.ndims is not None and reflection_axis.shape.ndims < 1:\n raise ValueError('Argument reflection_axis must have at least 1 dimension. Found: %s' % reflection_axis)", + "docstring": "Static check of reflection_axis.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_householder.py", + "ast_data": "FunctionDef name:_check_reflection_axis arg:self arg:reflection_axis arguments arg arg If BoolOp Compare Compare Raise Call" + }, + { + "library": "pytorch", + "name": "from_float", + "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n assert type(mod) == cls._FLOAT_MODULE, msg\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined.'\n weight_post_process = mod.qconfig.weight()\n weight_post_process(mod.weight)\n assert weight_post_process.dtype == torch.qint8, 'Weight observer must have a dtype of qint8'\n qweight = _quantize_weight(mod.weight.float(), weight_post_process)\n qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, mod.stride, mod.padding, mod.output_padding, mod.groups, mod.bias is not None, mod.dilation, mod.padding_mode)\n qconv.set_weight_bias(qweight, mod.bias)\n if not hasattr(mod, 'activation_post_process') or mod.activation_post_process.dtype == torch.float:\n return qconv\n else:\n act_scale, act_zp = mod.activation_post_process.calculate_qparams()\n qconv.scale = float(act_scale)\n qconv.zero_point = int(act_zp)\n return qconv", + "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py", + "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Compare Call Call Assign Call Call Compare Assign Call Call Assign Call Compare Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_xy1", + "source_code": "def set_xy1(self, *args, **kwargs):\n params = _api.select_matching_signature([lambda self, x, y: locals(), lambda self, xy1: locals()], self, *args, **kwargs)\n if 'x' in params:\n _api.warn_deprecated('3.10', message='Passing x and y separately to AxLine.set_xy1 is deprecated since %(since)s; pass them as a single tuple instead.')\n xy1 = (params['x'], params['y'])\n else:\n xy1 = params['xy1']\n self._xy1 = xy1", + "docstring": "Set the *xy1* value of the line. Parameters ---------- xy1 : tuple[float, float] Points for the line to pass through.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_xy1 arg:self arguments arg arg arg Assign Call arguments arg arg arg Call arguments arg arg Call If Compare Call Assign Assign Assign" + }, + { + "library": "django", + "name": "import_proj", + "source_code": "def import_proj(self, proj):\n capi.from_proj(self.ptr, proj)", + "docstring": "Import the Spatial Reference from a PROJ string.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:import_proj arg:self arg:proj arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "get_torchgen_root", + "source_code": "def get_torchgen_root() -> Path:\n return Path(__file__).parent.resolve()", + "docstring": "If you're depending on torchgen out-of-tree, you can use the root to figure out the path to native_functions.yaml", + "type": "function", + "file_path": "pytorch\\torchgen\\gen.py", + "ast_data": "FunctionDef name:get_torchgen_root arguments Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "grad_index", + "source_code": "@property\ndef grad_index(self):\n return self._grad_index", + "docstring": "The loop index of backprop loop.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:grad_index arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__)\n router.add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n router.add(splitter=check_cv(self.cv), method_mapping=MethodMapping().add(caller='fit', callee='split'))\n router.add(scorer=self._get_scorer(), method_mapping=MethodMapping().add(caller='fit', callee='score').add(caller='score', callee='score'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "null_count", + "source_code": "@property\n@abstractmethod\ndef null_count(self) -> int | None:\n pass", + "docstring": "Number of null elements, if known. Note: Arrow uses -1 to indicate \"unknown\", but None seems cleaner.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "FunctionDef name:null_count arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "parse_example_spec", + "source_code": "@property\ndef parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "log_loss", + "source_code": "@tf_export(v1=['losses.log_loss'])\n@dispatch.add_dispatch_support\ndef log_loss(labels, predictions, weights=1.0, epsilon=1e-07, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n if labels is None:\n raise ValueError('Argument `labels` must not be None.')\n if predictions is None:\n raise ValueError('Argument `predictions` must not be None.')\n with ops.name_scope(scope, 'log_loss', (predictions, labels, weights)) as scope:\n predictions = math_ops.cast(predictions, dtype=dtypes.float32)\n labels = math_ops.cast(labels, dtype=dtypes.float32)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n losses = -math_ops.multiply(labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(1 - labels, math_ops.log(1 - predictions + epsilon))\n return compute_weighted_loss(losses, weights, scope, loss_collection, reduction=reduction)", + "docstring": "Adds a Log Loss term to the training procedure. acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If is a tensor of size , then the total loss for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of matches the shape of , then the loss of each measurable element of is scaled by the corresponding value of . Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float . If is , this has the same shape as ; otherwise, it is scalar. Raises: ValueError: If the shape of doesn't match that of or if the shape of is invalid. Also if or is None. @compatibility(eager) The argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a . @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", + "ast_data": "FunctionDef name:log_loss arg:labels arg:predictions arg:weights arg:epsilon arg:scope arg:loss_collection arg:reduction arguments arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call With Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "is_ndonnx_array", + "source_code": "def is_ndonnx_array(x: object) -> TypeIs[ndx.Array]:\n cls = cast(Hashable, type(x))\n return _issubclass_fast(cls, 'ndonnx', 'Array')", + "docstring": "Return True if is a ndonnx Array. This function does not import ndonnx if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_ndonnx_array is_dask_array is_jax_array is_pydata_sparse_array", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_ndonnx_array arg:x arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "resolve_model_init_order", + "source_code": "def resolve_model_init_order(self):\n converter = connections[self.db].introspection.identifier_converter\n model_init_fields = [field for column_name, field in self.model_fields.items() if column_name in self.columns]\n annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields]\n model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]\n model_init_names = [f.attname for f in model_init_fields]\n return (model_init_names, model_init_order, annotation_fields)", + "docstring": "Resolve the init field names and value positions.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:resolve_model_init_order arg:self arguments arg Assign Assign Call Compare Assign Call Compare Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "GlobalStepWaiterHook", + "source_code": "@tf_export(v1=['train.GlobalStepWaiterHook'])\nclass GlobalStepWaiterHook(session_run_hook.SessionRunHook):\n\n def __init__(self, wait_until_step):\n self._wait_until_step = wait_until_step\n\n def begin(self):\n self._worker_is_started = False\n self._global_step_tensor = training_util._get_or_create_global_step_read()\n if self._global_step_tensor is None:\n raise RuntimeError('Global step should be created to use _GlobalStepWaiterHook.')\n\n def before_run(self, run_context):\n if self._worker_is_started:\n return None\n if self._wait_until_step <= 0:\n self._worker_is_started = True\n return None\n logging.info('Waiting for global step %d before starting training.', self._wait_until_step)\n last_logged_step = 0\n while True:\n current_step = run_context.session.run(self._global_step_tensor)\n if current_step >= self._wait_until_step:\n self._worker_is_started = True\n return None\n if current_step - last_logged_step > 1000:\n logging.info('Waiting for global step %d before starting training. Current step is %d.', self._wait_until_step, current_step)\n last_logged_step = current_step\n time.sleep(0.5)", + "docstring": "Delays execution until global step reaches . This hook delays execution until global step reaches to . It is used to gradually start workers in distributed settings. One example usage would be setting assuming that task_id=0 is the chief.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "ClassDef name:GlobalStepWaiterHook FunctionDef name:__init__ arg:self arg:wait_until_step arguments arg arg Assign FunctionDef name:begin arg:self arguments arg Assign Assign Call If Compare Raise Call FunctionDef name:before_run arg:self arg:run_context arguments arg arg If Return return:no If Compare Assign Return return:no Call Assign While Assign Call If Compare Assign Return return:no If Compare Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "_get_diff_root", + "source_code": "@staticmethod\ndef _get_diff_root(x, xp, fp):\n order = xp.argsort()\n return np.interp(x, xp[order], fp[order])", + "docstring": "Calculate diff root.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_get_diff_root arg:x arg:xp arg:fp arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "parents", + "source_code": "@abc.abstractproperty\ndef parents(self):\n pass", + "docstring": "Returns a list of immediate raw feature and FeatureColumn dependencies. For example: # For the following feature columns a = numeric_column('f1') c = crossed_column(a, 'f2') # The expected parents are: a.parents = ['f1'] c.parents = [a, 'f2']", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py", + "ast_data": "FunctionDef name:parents arg:self arguments arg" + }, + { + "library": "pandas", + "name": "validate_endpoints", + "source_code": "def validate_endpoints(closed: str | None) -> tuple[bool, bool]:\n left_closed = False\n right_closed = False\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == 'left':\n left_closed = True\n elif closed == 'right':\n right_closed = True\n else:\n raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n return (left_closed, right_closed)", + "docstring": "Check that the argument is among [None, \"left\", \"right\"] Parameters ---------- closed : {None, \"left\", \"right\"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values", + "type": "function", + "file_path": "pandas\\pandas\\util\\_validators.py", + "ast_data": "FunctionDef name:validate_endpoints arg:closed arguments arg Assign Assign If Compare Assign Assign If Compare Assign If Compare Assign Raise Call Return return:yes" + }, + { + "library": "django", + "name": "linear_units", + "source_code": "@property\ndef linear_units(self):\n return self.srs.linear_units", + "docstring": "Return the linear units.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", + "ast_data": "FunctionDef name:linear_units arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "broadcast_shapes", + "source_code": "def broadcast_shapes(*shapes: tuple[float | None, ...]) -> tuple[int | None, ...]:\n if not shapes:\n return ()\n ndim = max((len(shape) for shape in shapes))\n out: list[int | None] = []\n for axis in range(-ndim, 0):\n sizes = {shape[axis] for shape in shapes if axis >= -len(shape)}\n none_size = None in sizes or math.nan in sizes\n sizes -= {1, None, math.nan}\n if len(sizes) > 1:\n msg = f'shape mismatch: objects cannot be broadcast to a single shape: {shapes}.'\n raise ValueError(msg)\n out.append(None if none_size else cast(int, sizes.pop()) if sizes else 1)\n return tuple(out)", + "docstring": "Compute the shape of the broadcasted arrays. Duplicates :func:, with additional support for None and NaN sizes. This is equivalent to `` for unknown sizes. Examples -------- >>> import array_api_extra as xpx >>> xpx.broadcast_shapes((2, 3), (2, 1)) (2, 3) >>> xpx.broadcast_shapes((4, 2, 3), (2, 1), (1, 3)) (4, 2, 3)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", + "ast_data": "FunctionDef name:broadcast_shapes arguments arg If Return return:no Assign Call Call For Call Assign Compare Call Assign BoolOp Compare Compare If Compare Call Assign Raise Call Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "twinx", + "source_code": "def twinx(self, axes_class=None, **kwargs):\n if axes_class:\n kwargs['axes_class'] = axes_class\n ax2 = self._make_twin_axes(sharex=self, **kwargs)\n ax2.yaxis.tick_right()\n ax2.yaxis.set_label_position('right')\n ax2.yaxis.set_offset_position('right')\n ax2.set_autoscalex_on(self.get_autoscalex_on())\n self.yaxis.tick_left()\n ax2.xaxis.set_visible(False)\n ax2.patch.set_visible(False)\n ax2.xaxis.units = self.xaxis.units\n return ax2", + "docstring": "Create a twin Axes sharing the xaxis. Create a new Axes with an invisible x-axis and an independent y-axis positioned opposite to the original one (i.e. at right). The x-axis autoscale setting will be inherited from the original Axes. To ensure that the tick marks of both y-axes align, see . Parameters ---------- axes_class : subclass type of , optional The subclass that is instantiated. This parameter is incompatible with *projection* and *polar*. See :ref: for examples. By default, is used. .. versionadded:: 3.11 kwargs : dict The keyword arguments passed to or . .. versionadded:: 3.11 Returns ------- Axes The newly created Axes instance Notes ----- For those who are 'picking' artists while using twinx, pick events are only called for the artists in the top-most Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:twinx arg:self arg:axes_class arguments arg arg arg If Assign Assign Call Call Call Call Call Call Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "set_checkpoint_debug_enabled", + "source_code": "@contextlib.contextmanager\ndef set_checkpoint_debug_enabled(enabled: Optional[bool]):\n global _checkpoint_debug_enabled\n try:\n prev = _checkpoint_debug_enabled\n _checkpoint_debug_enabled = enabled\n yield\n finally:\n _checkpoint_debug_enabled = prev", + "docstring": "Context manager that sets whether checkpoint should print additional debug information when running. See the `~torch.utils.checkpoint.checkpoint` to this context. Args: enabled (bool): Whether checkpoint should print debug information. Default is 'None'.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\checkpoint.py", + "ast_data": "FunctionDef name:set_checkpoint_debug_enabled arg:enabled arguments arg Try Assign Assign Assign" + }, + { + "library": "matplotlib", + "name": "_get_camera_loc", + "source_code": "def _get_camera_loc(self):\n cx, cy, cz, dx, dy, dz = self._get_w_centers_ranges()\n c = np.array([cx, cy, cz])\n r = np.array([dx, dy, dz])\n if self._focal_length == np.inf:\n focal_length = 1000000000.0\n else:\n focal_length = self._focal_length\n eye = c + self._view_w * self._dist * r / self._box_aspect * focal_length\n return eye", + "docstring": "Returns the current camera location in data coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:_get_camera_loc arg:self arguments arg Assign Call Assign Call Assign Call If Compare Assign Assign Assign Return return:yes" + }, + { + "library": "pandas", + "name": "DtypeWarning", + "source_code": "class DtypeWarning(Warning):\n pass", + "docstring": "Warning raised when reading different dtypes in a column from a file. Raised for a dtype incompatibility. This can happen whenever or encounter non-uniform dtypes in a column(s) of a given CSV file. See Also -------- read_csv : Read CSV (comma-separated) file into a DataFrame. read_table : Read general delimited file into a DataFrame. Notes ----- This warning is issued when dealing with larger files because the dtype checking happens per chunk read. Despite the warning, the CSV file is read with mixed types in a single column which will be an object type. See the examples below to better understand this issue. Examples -------- This example creates and reads a large CSV file with a column that contains and . >>> df = pd.DataFrame( ... { ... \"a\": ([\"1\"] * 100000 + [\"X\"] * 100000 + [\"1\"] * 100000), ... \"b\": [\"b\"] * 300000, ... } ... ) # doctest: +SKIP >>> df.to_csv(\"test.csv\", index=False) # doctest: +SKIP >>> df2 = pd.read_csv(\"test.csv\") # doctest: +SKIP ... # DtypeWarning: Columns (0: a) have mixed types Important to notice that `strintdtyperead_csvread_table` functions to explicit the conversion: >>> df2 = pd.read_csv(\"test.csv\", sep=\",\", dtype={\"a\": str}) # doctest: +SKIP No warning was issued.", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:DtypeWarning" + }, + { + "library": "scipy", + "name": "random", + "source_code": "def random(self, n: IntNumber=1, *, workers: IntNumber=1) -> np.ndarray:\n sample = self._random(n, workers=workers)\n if self.optimization_method is not None:\n sample = self.optimization_method(sample)\n self.num_generated += n\n return sample", + "docstring": "Draw in the half-open interval `Haltonn10^3`. Returns ------- sample : array_like (n, d) QMC sample.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:random arg:self arg:n arguments arg arg arg Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "last_call", + "source_code": "@property\ndef last_call(self) -> timedelta:\n return self._last_call", + "docstring": "Get the last call timeout.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:last_call arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "type_check_node", + "source_code": "def type_check_node(self, n: Node):\n if n.type is None:\n n.type = Dyn\n if n.op == 'placeholder':\n return n.type\n elif n.op == 'get_attr':\n t = get_parameter(self.traced, n.target)\n if isinstance(t.data, torch.Tensor):\n n.type = TensorType(t.data.shape)\n return n.type\n elif n.op == 'call_function':\n if n.target == getattr:\n assert getattr in _INFERENCE_RULES\n return _INFERENCE_RULES[n.target](n, self.traced)\n elif n.target in _INFERENCE_RULES:\n return _INFERENCE_RULES[n.target](n)\n else:\n raise RuntimeError(f'No inference rule registered for target {n.target}!')\n elif n.op == 'call_module':\n module_instance = self.traced.get_submodule(n.target)\n if type(module_instance) in _INFERENCE_RULES:\n return _INFERENCE_RULES[type(module_instance)](n, module_instance)\n else:\n raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')\n elif n.op == 'output':\n\n def get_node_type(a):\n return a.type\n n.type = torch.fx.node.map_arg(n.args[0], get_node_type)\n return n.type\n else:\n raise NotImplementedError(f'Method {n.op} not yet implemented')", + "docstring": "Type check a given fx node. Current operations: - Reshape - Transpose - Add - Relu - conv2d - batchnorm2d - flatten - maxpool2d - adaptiveavgpool2d - linear", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:type_check_node arg:self arg:n arguments arg arg If Compare Assign If Compare Return return:yes If Compare Assign Call If Call Assign Call Return return:yes If Compare If Compare Compare Return return:yes Call If Compare Return return:yes Call Raise Call If Compare Assign Call If Compare Call Return return:yes Call Call Raise Call Call If Compare FunctionDef name:get_node_type arg:a arguments arg Return return:yes Assign Call Return return:yes Raise Call" + }, + { + "library": "pandas", + "name": "DataIndexableCol", + "source_code": "class DataIndexableCol(DataCol):\n is_data_indexable = True\n\n def validate_names(self) -> None:\n if not is_string_dtype(Index(self.values).dtype):\n raise ValueError('cannot have non-object label DataIndexableCol')\n\n @classmethod\n def get_atom_string(cls, shape, itemsize):\n return _tables().StringCol(itemsize=itemsize)\n\n @classmethod\n def get_atom_data(cls, shape, kind: str) -> Col:\n return cls.get_atom_coltype(kind=kind)()\n\n @classmethod\n def get_atom_datetime64(cls, shape):\n return _tables().Int64Col()\n\n @classmethod\n def get_atom_timedelta64(cls, shape):\n return _tables().Int64Col()", + "docstring": "represent a data column that can be indexed", + "type": "class", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "ClassDef name:DataIndexableCol Assign FunctionDef name:validate_names arg:self arguments arg If Call Call Raise Call FunctionDef name:get_atom_string arg:cls arg:shape arg:itemsize arguments arg arg arg Return return:yes Call Call FunctionDef name:get_atom_data arg:cls arg:shape arg:kind arguments arg arg arg Return return:yes Call Call FunctionDef name:get_atom_datetime64 arg:cls arg:shape arguments arg arg Return return:yes Call Call FunctionDef name:get_atom_timedelta64 arg:cls arg:shape arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_rng_state_all", + "source_code": "def get_rng_state_all() -> list[Tensor]:\n results = [get_rng_state(i) for i in range(device_count())]\n return results", + "docstring": "Return a list of ByteTensor representing the random number states of all devices.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\random.py", + "ast_data": "FunctionDef name:get_rng_state_all arguments Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "centroid", + "source_code": "@lazy_cython\ndef centroid(y):\n return linkage(y, method='centroid', metric='euclidean')", + "docstring": "Perform centroid/UPGMC linkage. See for more information on the input matrix, return structure, and algorithm. The following are common calling conventions: 1. `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:centroid arg:y arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_format_timedelta64", + "source_code": "def get_format_timedelta64(values: TimedeltaArray, nat_rep: str | float='NaT', box: bool=False) -> Callable:\n even_days = values._is_dates_only\n if even_days:\n format = None\n else:\n format = 'long'\n\n def _formatter(x):\n if x is None or (is_scalar(x) and isna(x)):\n return nat_rep\n if not isinstance(x, Timedelta):\n x = Timedelta(x)\n result = x._repr_base(format=format)\n if box:\n result = f\"'{result}'\"\n return result\n return _formatter", + "docstring": "Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:get_format_timedelta64 arg:values arg:nat_rep arg:box arguments arg arg arg Assign If Assign Assign FunctionDef name:_formatter arg:x arguments arg If BoolOp Compare BoolOp Call Call Return return:yes If Call Assign Call Assign Call If Assign Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_WorldMeta", + "source_code": "class _WorldMeta(type):\n\n @property\n def WORLD(cls) -> Optional[ProcessGroup]:\n return _world.default_pg\n\n @WORLD.setter\n def WORLD(cls, pg: Optional[ProcessGroup]):\n _world.default_pg = pg", + "docstring": "Meta class of ``.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "ClassDef name:_WorldMeta FunctionDef name:WORLD arg:cls arguments arg Return return:yes FunctionDef name:WORLD arg:cls arg:pg arguments arg arg Assign" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X, copy=None):\n check_is_fitted(self)\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr', copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite='allow-nan')\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError('Cannot uncenter sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives.')\n if self.scale_ is not None:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_std:\n X *= self.scale_\n if self.with_mean:\n X += self.mean_\n return X", + "docstring": "Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input or not. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Compare Assign Call If Call If Raise Call If Compare Call If If Return return:yes" + }, + { + "library": "cherrypy", + "name": "dict_from_file", + "source_code": "def dict_from_file(self, file):\n if hasattr(file, 'read'):\n self.read_file(file)\n else:\n self.read(file)\n return self.as_dict()", + "docstring": "Generate a dict from a file.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py", + "ast_data": "FunctionDef name:dict_from_file arg:self arg:file arguments arg arg If Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "display_modulewise_stats", + "source_code": "def display_modulewise_stats(self, depth: int=2) -> None:\n print('Pre-Forward Execution Order: ')\n for mod_fqn in self.mod_fw_pre_order:\n mod_depth = mod_fqn.count('.') + 1\n if mod_depth > depth:\n continue\n print(mod_fqn)\n print('Pre-Backward Execution Order: ')\n for mod_fqn in self.mod_bw_pre_order:\n mod_depth = mod_fqn.count('.') + 1\n if mod_depth > depth:\n continue\n print(mod_fqn)\n for mod_fqn, runtimes in self.mod_runtimes.items():\n mod_depth = mod_fqn.count('.') + 1\n if mod_depth > depth:\n continue\n print(f'{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms')", + "docstring": "Displays module-wise statistics collected by ``. Prints the pre-forward and pre-backward execution orders. Displays the module-wise forward and backward runtimes in milliseconds. Args: depth (int): The maximum depth of module hierarchy to display (default to 2).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py", + "ast_data": "FunctionDef name:display_modulewise_stats arg:self arg:depth arguments arg arg Call For Assign Call If Compare Call Call For Assign Call If Compare Call For Call Assign Call If Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "add_real_datasets", + "source_code": "def add_real_datasets(self, datasets, other_datasets, cat_feature2cats, ranking=False):\n if other_datasets:\n for name, path in other_datasets:\n df_other, choices, _, _, _ = self.get_df(path, cat_feature2cats=cat_feature2cats, apply_filters=False, add_near_best=ranking)\n datasets[name] = df_other", + "docstring": "Adds datasets specified by the user to the datasets dictionary.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:add_real_datasets arg:self arg:datasets arg:other_datasets arg:cat_feature2cats arg:ranking arguments arg arg arg arg arg If For Assign Call Assign" + }, + { + "library": "pytorch", + "name": "get_test_and_val_size", + "source_code": "def get_test_and_val_size(self):\n return (0.15, 0.15)", + "docstring": "Returns the size of the test and validation sets.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:get_test_and_val_size arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "inbound_nodes", + "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef inbound_nodes(self):\n return self._inbound_nodes", + "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:inbound_nodes arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "update_state_wrapper", + "source_code": "def update_state_wrapper(update_state_fn):\n\n def decorated(metric_obj, *args, **kwargs):\n strategy = distribute_lib.get_strategy()\n for weight in metric_obj.weights:\n if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n update_op = update_state_fn(*args, **kwargs)\n if update_op is not None:\n metric_obj.add_update(update_op)\n return update_op\n return tf_decorator.make_decorator(update_state_fn, decorated)", + "docstring": "Decorator to wrap metric with . Args: update_state_fn: function that accumulates metric statistics. Returns: Decorated function that wraps with .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py", + "ast_data": "FunctionDef name:update_state_wrapper arg:update_state_fn arguments arg FunctionDef name:decorated arg:metric_obj arguments arg arg arg Assign Call For If BoolOp Call Call Call Raise Call With Call Assign Call If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "as_datetime", + "source_code": "def as_datetime(self):\n if not self.is_set:\n return None\n yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]\n status = capi.get_field_as_datetime(self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd), byref(hh), byref(mn), byref(ss), byref(tz))\n if status:\n return (yy, mm, dd, hh, mn, ss, tz)\n else:\n raise GDALException('Unable to retrieve date & time information from the field.')", + "docstring": "Retrieve the Field's value as a tuple of date & time components.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:as_datetime arg:self arguments arg If Return return:no Assign Call Call Assign Call Call Call Call Call Call Call Call If Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "generate_non_native_lazy_ir_nodes", + "source_code": "def generate_non_native_lazy_ir_nodes(non_native: list[dict[str, Any]], gen_lazy_ir: GenLazyIR) -> list[str]:\n nodes = []\n for op in non_native:\n properties = LazyIrProperties('ShapeCache', 'CanBeReused', 'LowerDeclOnly')\n for p in op.get('properties', []):\n setattr(properties, p, True)\n schema = LazyIrSchema(FunctionSchema.parse(op['func']), properties, symint=True)\n schema.opkind = op.get('opkind')\n nodes.append(gen_lazy_ir.gen(schema)[0])\n return nodes", + "docstring": "Generate the non-native lazy IR node classes", + "type": "function", + "file_path": "pytorch\\torchgen\\dest\\lazy_ir.py", + "ast_data": "FunctionDef name:generate_non_native_lazy_ir_nodes arg:non_native arg:gen_lazy_ir arguments arg arg Assign For Assign Call For Call Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "lint", + "source_code": "@click.command()\n@click.option('--fix', default=False, is_flag=True, help='Attempt to auto-fix errors')\n@click.option('--diff-against', default='main', help='Diff against this branch and lint modified files. Use either `--diff-against` or `--files`, but not both.')\n@click.option('--files', default='', help='Lint these files or directories; use **/*.py to lint all files')\n@click.option('--all', default=False, is_flag=True, help='This overrides `--diff-against` and `--files` to lint all local files (excluding subprojects).')\n@click.option('--no-cython', default=True, is_flag=True, help='Do not run cython-lint.')\n@click.pass_context\ndef lint(ctx, fix, diff_against, files, all, no_cython):\n cmd_prefix = [sys.executable] if sys.platform == 'win32' else []\n cmd_lint = cmd_prefix + [os.path.join('tools', 'lint.py'), f'--diff-against={diff_against}']\n if files != '':\n cmd_lint += [f'--files={files}']\n if all:\n cmd_lint += ['--all']\n if no_cython:\n cmd_lint += ['--no-cython']\n if fix:\n cmd_lint += ['--fix']\n util.run(cmd_lint)\n cmd_unicode = cmd_prefix + [os.path.join('tools', 'check_unicode.py')]\n util.run(cmd_unicode)\n cmd_check_test_name = cmd_prefix + [os.path.join('tools', 'check_test_name.py')]\n util.run(cmd_check_test_name)", + "docstring": "🔦 Run linter on modified files and check for disallowed Unicode characters and possibly-invalid test names.", + "type": "function", + "file_path": "scipy\\.spin\\cmds.py", + "ast_data": "FunctionDef name:lint arg:ctx arg:fix arg:diff_against arg:files arg:all arg:no_cython arguments arg arg arg arg arg arg Assign Compare Assign Call If Compare If If If Call Assign Call Call Assign Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "streaming_restore", + "source_code": "@tf_export('__internal__.tracking.streaming_restore', v1=[])\ndef streaming_restore(status, session=None):\n if context.executing_eagerly():\n return\n if session is None:\n session = get_session()\n if isinstance(status, NameBasedSaverStatus):\n raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.')\n status.run_restore_ops(session=session)\n status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict=status._feed_dict)", + "docstring": "When graph building, runs restore ops as soon as they come in. Args: status: A _LoadStatus objects from an object-based saver's restore(). Streaming restore from name-based checkpoints is not currently supported. session: A session to run new restore ops in.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:streaming_restore arg:status arg:session arguments arg arg If Call Return return:no If Compare Assign Call If Call Raise Call Call Assign arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "_pick_scalar_condition", + "source_code": "def _pick_scalar_condition(pred, cond_true, cond_false):\n pred_ = _static_value(pred)\n if pred_ is None:\n return array_ops.where_v2(pred, cond_true, cond_false)\n return cond_true if pred_ else cond_false", + "docstring": "Convenience function which chooses the condition based on the predicate.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_pick_scalar_condition arg:pred arg:cond_true arg:cond_false arguments arg arg arg Assign Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "dtype_counts", + "source_code": "@property\ndef dtype_counts(self) -> Mapping[str, int]:\n return self.info.dtype_counts", + "docstring": "Mapping dtype - number of counts.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:dtype_counts arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "zeros_like_v2", + "source_code": "@dispatch.dispatch_for_types(array_ops.zeros_like_v2, StructuredTensor)\ndef zeros_like_v2(input, dtype=None, name=None, layout=None):\n if layout is not None and (not layout.is_fully_replicated()):\n raise ValueError(f'StructuredTensor only allows replicated layout. got {layout}')\n if dtype is None:\n dtype = dtypes.float32\n with ops.name_scope(name, 'zeros_like', [input]) as name:\n if not input.row_partitions:\n if input.nrows() is not None:\n return array_ops.zeros([input.nrows()], dtype, layout=layout)\n else:\n return array_ops.zeros([], dtype, layout=layout)\n last_row_partition = input.row_partitions[-1]\n result = ragged_tensor.RaggedTensor._from_nested_row_partitions(array_ops.zeros(last_row_partition.nvals(), dtype=dtype), input.row_partitions)\n return result", + "docstring": "Replace every object with a zero. Example: >>> st = StructuredTensor.from_pyval([{\"x\":[3]}, {\"x\":[4,5]}]) >>> tf.zeros_like(st) >>> st = StructuredTensor.from_pyval([[{\"x\":[3]}], [{\"x\":[4,5]}, {\"x\":[]}]]) >>> tf.zeros_like(st, dtype=tf.int32) Args: input: a structured tensor. dtype: the dtype of the resulting zeros. (default is tf.float32) name: a name for the op. layout: Optional Layout. Only supports replicated layout. Returns: a tensor of zeros of the same shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:zeros_like_v2 arg:input arg:dtype arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call If Compare Assign With Call If If Compare Call Return return:yes Call Call Return return:yes Call Assign Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "creator_with_resource_vars", + "source_code": "def creator_with_resource_vars(next_creator, **kwargs):\n if ops.inside_function():\n if_graph_building = 'graph_building'\n else:\n if_graph_building = 'not_graph_building'\n with monitoring.MonitoredTimer(distributed_variable_creation_time_counter.get_cell(strategy.__class__.__name__, if_graph_building)):\n _require_strategy_scope_extended(self)\n kwargs['use_resource'] = True\n kwargs['distribute_strategy'] = strategy\n if isinstance(kwargs['initial_value'], trackable.CheckpointInitialValue):\n checkpoint_restore_uid = kwargs['initial_value'].checkpoint_position.restore_uid\n kwargs['initial_value'] = kwargs['initial_value'].wrapped_value\n elif isinstance(kwargs['initial_value'], trackable.CheckpointInitialValueCallable):\n checkpoint_restore_uid = kwargs['initial_value'].checkpoint_position.restore_uid\n elif isinstance(kwargs['initial_value'], functools.partial) and isinstance(kwargs['initial_value'].func, trackable.CheckpointInitialValueCallable):\n checkpoint_restore_uid = kwargs['initial_value'].func.checkpoint_position.restore_uid\n else:\n checkpoint_restore_uid = None\n created = self._create_variable(next_creator, **kwargs)\n if checkpoint_restore_uid is not None:\n created._maybe_initialize_trackable()\n created._update_uid = checkpoint_restore_uid\n return created", + "docstring": "Variable creator to use in .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:creator_with_resource_vars arg:next_creator arguments arg arg If Call Assign Assign With Call Call Call Assign Assign If Call Assign Assign If Call Assign If BoolOp Call Call Assign Assign Assign Call If Compare Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "get_matrix", + "source_code": "def get_matrix(self):\n if self.approx_type == 'hess':\n M = np.copy(self.B)\n else:\n M = np.copy(self.H)\n li = np.tril_indices_from(M, k=-1)\n M[li] = M.T[li]\n return M", + "docstring": "Return the current internal matrix. Returns ------- M : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how was defined).", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:get_matrix arg:self arguments arg If Compare Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_TransformedBboxWithCallback", + "source_code": "class _TransformedBboxWithCallback(TransformedBbox):\n\n def __init__(self, *args, callback, **kwargs):\n super().__init__(*args, **kwargs)\n self._callback = callback\n\n def get_points(self):\n self._callback()\n return super().get_points()", + "docstring": "Variant of which calls *callback* before returning points. Used by to unstale the parent axes' viewlim as needed.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", + "ast_data": "ClassDef name:_TransformedBboxWithCallback FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign FunctionDef name:get_points arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "axes", + "source_code": "@property\ndef axes(self):\n return self._axes", + "docstring": "The instance the artist resides in, or *None*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "LoadContext", + "source_code": "class LoadContext(threading.local):\n\n def __init__(self):\n super(LoadContext, self).__init__()\n self._entered_load_context = []\n self._load_options = None\n\n def set_load_options(self, load_options):\n self._load_options = load_options\n self._entered_load_context.append(True)\n\n def clear_load_options(self):\n self._load_options = None\n self._entered_load_context.pop()\n\n def load_options(self):\n return self._load_options\n\n def in_load_context(self):\n return self._entered_load_context", + "docstring": "A context for loading a model.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load_context.py", + "ast_data": "ClassDef name:LoadContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:set_load_options arg:self arg:load_options arguments arg arg Assign Call FunctionDef name:clear_load_options arg:self arguments arg Assign Call FunctionDef name:load_options arg:self arguments arg Return return:yes FunctionDef name:in_load_context arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_maybe_reindex_columns_na_proxy", + "source_code": "def _maybe_reindex_columns_na_proxy(axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], needs_copy: bool) -> list[BlockManager]:\n new_mgrs = []\n for mgr, indexers in mgrs_indexers:\n for i, indexer in indexers.items():\n mgr = mgr.reindex_indexer(axes[i], indexers[i], axis=i, only_slice=True, allow_dups=True, use_na_proxy=True)\n if needs_copy and (not indexers):\n mgr = mgr.copy()\n new_mgrs.append(mgr)\n return new_mgrs", + "docstring": "Reindex along columns so that all of the BlockManagers being concatenated have matching columns. Columns added in this reindexing have dtype=np.void, indicating they should be ignored when choosing a column's final dtype.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_maybe_reindex_columns_na_proxy arg:axes arg:mgrs_indexers arg:needs_copy arguments arg arg arg Assign For For Call Assign Call If BoolOp Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_url", + "source_code": "def get_url(self):\n return self._url", + "docstring": "Return the url.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_url arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "expand", + "source_code": "@_onnx_symbolic('aten::expand')\n@symbolic_helper.quantized_args(True)\ndef expand(g: jit_utils.GraphContext, self, size, implicit):\n size = symbolic_helper._maybe_get_const(size, 'is')\n if not symbolic_helper._is_value(size):\n size = g.op('Constant', value_t=torch.LongTensor(size))\n elif symbolic_helper._is_packed_list(size):\n size = symbolic_helper._reshape_helper(g, stack(g, size, 0), g.op('Constant', value_t=torch.tensor([-1])))\n dtype = _type_utils.JitScalarType.INT64\n ones = ones_like(g, size, dtype)\n neg_ones = mul(g, ones, g.op('Constant', value_t=torch.tensor(-1)))\n size = where(g, g.op('Equal', size, neg_ones), ones, size)\n return g.op('Expand', self, size)", + "docstring": "Implement the expand function for a pytorch tensor in ONNX according to specified", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", + "ast_data": "FunctionDef name:expand arg:g arg:self arg:size arg:implicit arguments arg arg arg arg Assign Call If Call Assign Call Call If Call Assign Call Call Call Call Assign Assign Call Assign Call Call Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "xw_plus_b", + "source_code": "@tf_export(v1=['nn.xw_plus_b'])\n@dispatch.add_dispatch_support\ndef xw_plus_b(x, weights, biases, name=None):\n with ops.name_scope(name, 'xw_plus_b', [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name='x')\n weights = ops.convert_to_tensor(weights, name='weights')\n biases = ops.convert_to_tensor(biases, name='biases')\n mm = math_ops.matmul(x, weights)\n return bias_add(mm, biases, name=name)", + "docstring": "Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified \"xw_plus_b\" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:xw_plus_b arg:x arg:weights arg:biases arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_parse_variant_shapes_and_types", + "source_code": "def _parse_variant_shapes_and_types(t):\n shapes_and_types = _variant_handle_data(t)\n if shapes_and_types is None or not shapes_and_types:\n raise ValueError('Required handle data not set for {!r}'.format(t))\n if shapes_and_types[0].type.type_id == full_type_pb2.TFT_ARRAY:\n return shapes_and_types\n elif shapes_and_types[0].type.type_id == full_type_pb2.TFT_UNSET:\n return shapes_and_types\n else:\n raise ValueError('Attempted to stack a variant-dtype tensor with no type set ({!r})'.format(t))", + "docstring": "Extracts shape and dtype information from a variant tensor .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_parse_variant_shapes_and_types arg:t arguments arg Assign Call If BoolOp Compare Raise Call Call If Compare Return return:yes If Compare Return return:yes Raise Call Call" + }, + { + "library": "pytorch", + "name": "user_cache_dir", + "source_code": "def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):\n if system == 'win32':\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder('CSIDL_LOCAL_APPDATA'))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n if opinion:\n path = os.path.join(path, 'Cache')\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Caches')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path", + "docstring": "Return full path to the user-specific cache dir for this application. \"appname\" is the name of application. If None, just the system directory is returned. \"appauthor\" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. \"version\" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be \".\". Only applied when appname is present. \"opinion\" (boolean) can be False to disable the appending of \"Cache\" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Cache Vista: C:\\Users\\\\AppData\\Local\\\\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the directory. This is identical to the non-roaming app data dir (the default returned by above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\\Mozilla\\Firefox\\Profiles\\\\Cache ...\\Acme\\SuperApp\\Cache\\1.0 OPINION: This function appends \"Cache\" to the value. This can be disabled with the option.", + "type": "function", + "file_path": "pytorch\\torch\\_appdirs.py", + "ast_data": "FunctionDef name:user_cache_dir arg:appname arg:appauthor arg:version arg:opinion arguments arg arg arg arg If Compare If Compare Assign Assign Call Call If If Compare Assign Call Assign Call If Assign Call If Compare Assign Call If Assign Call Assign Call Call If Assign Call If BoolOp Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "with_flat_values", + "source_code": "def with_flat_values(self, new_values):\n if isinstance(self._values, RaggedTensor):\n return self.with_values(self.values.with_flat_values(new_values))\n else:\n new_values = _convert_to_ragged_tensor_values(new_values)\n return self.with_values(new_values)", + "docstring": "Returns a copy of with replaced by . Preserves cached row-partitioning tensors such as and if they have values. Args: new_values: Potentially ragged tensor that should replace . Must have , and must have the same number of rows as . Returns: A . . .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:with_flat_values arg:self arg:new_values arguments arg arg If Call Return return:yes Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "__acall__", + "source_code": "async def __acall__(self, request):\n response = None\n if hasattr(self, 'process_request'):\n response = await sync_to_async(self.process_request, thread_sensitive=True)(request)\n response = response or await self.get_response(request)\n if hasattr(self, 'process_response'):\n response = await sync_to_async(self.process_response, thread_sensitive=True)(request, response)\n return response", + "docstring": "Async version of __call__ that is swapped in when an async request is running.", + "type": "method", + "file_path": "django\\django\\utils\\deprecation.py", + "ast_data": "AsyncFunctionDef name:__acall__ arg:self arg:request arguments arg arg Assign If Call Assign Call Call Assign BoolOp Call If Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_validate_center_shape", + "source_code": "def _validate_center_shape(self, X, centers):\n if centers.shape[0] != self.n_clusters:\n raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of clusters {self.n_clusters}.')\n if centers.shape[1] != X.shape[1]:\n raise ValueError(f'The shape of the initial centers {centers.shape} does not match the number of features of the data {X.shape[1]}.')", + "docstring": "Check if centers is compatible with X and n_clusters.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py", + "ast_data": "FunctionDef name:_validate_center_shape arg:self arg:X arg:centers arguments arg arg arg If Compare Raise Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "_copy_tensors_to_device", + "source_code": "def _copy_tensors_to_device(self, partitioned_tensors: Dict[str, Any]) -> Any:\n partitioned_device_tensors = {}\n for table_name in partitioned_tensors:\n partitioned_tensor = partitioned_tensors[table_name][0]\n row_pointers_unpadded_size = partitioned_tensors[table_name][1]\n ids_unpadded_size = partitioned_tensors[table_name][2]\n row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = xla_ops.tpu_copy_with_dynamic_shape([partitioned_tensor.row_pointers, partitioned_tensor.sorted_sample_ids, partitioned_tensor.sorted_token_ids, partitioned_tensor.sorted_gains], [row_pointers_unpadded_size, ids_unpadded_size, ids_unpadded_size, ids_unpadded_size])\n row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = xla_ops.tpu_annotate_tensors_with_dynamic_shape([row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains])\n partitioned_device_tensors[table_name] = PartitionedCsrFormatTensor(row_pointers=row_pointers, sorted_sample_ids=sorted_sample_ids, sorted_token_ids=sorted_token_ids, sorted_gains=sorted_gains, sample_count=partitioned_tensor.sample_count, num_minibatches_per_physical_sparse_core=partitioned_tensor.num_minibatches_per_physical_sparse_core)\n return partitioned_device_tensors", + "docstring": "Copy tensors to device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_copy_tensors_to_device arg:self arg:partitioned_tensors arguments arg arg Assign For Assign Assign Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "release_buffer", + "source_code": "def release_buffer(self, name):\n assert name in self.local_buffers\n return f'_{name}.release()'", + "docstring": "Codegen the code to release the ownership of a local buffer to others", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py", + "ast_data": "FunctionDef name:release_buffer arg:self arg:name arguments arg arg Compare Return return:yes" + }, + { + "library": "pandas", + "name": "FloatingArray", + "source_code": "class FloatingArray(NumericArray):\n _dtype_cls = FloatingDtype", + "docstring": "Array of floating (optional missing) values. .. warning:: FloatingArray is currently experimental, and its API or internal implementation may change without warning. Especially the behaviour regarding NaN (distinct from NA missing values) is subject to change. We represent a FloatingArray with 2 numpy arrays: - data: contains a numpy float array of the appropriate dtype - mask: a boolean array holding a mask on the data, True is missing To construct an FloatingArray from generic array-like input, use :func: with one of the float dtypes (see examples). See :ref: for more. Parameters ---------- values : numpy.ndarray A 1-d float-dtype array. mask : numpy.ndarray A 1-d boolean-dtype array indicating missing values. copy : bool, default False Whether to copy the and . Attributes ---------- None Methods ------- None Returns ------- FloatingArray See Also -------- array : Create an array. Float32Dtype : Float32 dtype for FloatingArray. Float64Dtype : Float64 dtype for FloatingArray. Series : One-dimensional labeled array capable of holding data. DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data. Examples -------- Create an FloatingArray with :func:: >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype()) [0.1, , 0.3] Length: 3, dtype: Float32 String aliases for the dtypes are also available. They are capitalized. >>> pd.array([0.1, None, 0.3], dtype=\"Float32\") [0.1, , 0.3] Length: 3, dtype: Float32", + "type": "class", + "file_path": "pandas\\pandas\\core\\arrays\\floating.py", + "ast_data": "ClassDef name:FloatingArray Assign" + }, + { + "library": "pandas", + "name": "time_frame_period_formatting_iso8601_strftime_offset", + "source_code": "def time_frame_period_formatting_iso8601_strftime_offset(self, nobs, freq):\n self.data['p'].dt.strftime(date_format='%Y-%m-%dT%H:%M:%S%z')", + "docstring": "Not optimized yet as %z is not supported by", + "type": "method", + "file_path": "pandas\\asv_bench\\benchmarks\\strftime.py", + "ast_data": "FunctionDef name:time_frame_period_formatting_iso8601_strftime_offset arg:self arg:nobs arg:freq arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "build_shuffle_all_reduce", + "source_code": "def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):\n input_tensors, shape = _flatten_tensors(input_tensors)\n dst_devices = [t.device for t in input_tensors]\n reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op)\n output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices)\n if len(shape) != 1:\n output_tensors = _reshape_tensors(output_tensors, shape)\n return output_tensors", + "docstring": "Construct a subgraph for shuffle all-reduce. Shuffle reduce is essentially the algorithm implemented when using parameter servers. Suppose tensor length is n, there are d devices and g gather shards. Each device sends a n/g length sub-tensor to each gather shard. The gather shards perform a reduction across d fragments, then broadcast the result back to each device. The devices then join the g fully reduced fragments they receive from the shards. The gather shards could perform d-1 pairwise reductions, or one d-way reduction. The first is better where reduction Op time is low compared to transmission time, the second better in the other case. Args: input_tensors: list of values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: an n-array elementwise reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of which are the fully reduced tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:build_shuffle_all_reduce arg:input_tensors arg:gather_devices arg:red_op arg:un_op arguments arg arg arg arg Assign Call Assign Assign Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y, sample_weight=None):\n from .metrics import r2_score\n y_pred = self.predict(X)\n return r2_score(y, y_pred, sample_weight=sample_weight)", + "docstring": "Return :ref: on test data. The coefficient of determination, :math:, is defined as :math:, where :math: is the residual sum of squares `vyR^2XR^2yR^2~sklearn.metrics.r2_score~sklearn.multioutput.MultiOutputRegressor`).", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Normal'):\n parameters = dict(locals())\n with ops.name_scope(name, values=[loc, scale]) as name:\n with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):\n self._loc = array_ops.identity(loc, name='loc')\n self._scale = array_ops.identity(scale, name='scale')\n check_ops.assert_same_float_dtype([self._loc, self._scale])\n super(Normal, self).__init__(dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)", + "docstring": "Construct Normal distributions with mean and stddev and . The parameters and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: loc: Floating point tensor; the means of the distribution(s). scale: Floating point tensor; the stddevs of the distribution(s). Must contain only positive values. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if and have different .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call With Call Call Assign Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_use_tensor_values_cache", + "source_code": "def _use_tensor_values_cache(self):\n return self._parameters.use_compact_trace", + "docstring": "Returns True if immediate tensors should be first saved to a cache.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_use_tensor_values_cache arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "validate_initiate_login_uri", + "source_code": "def validate_initiate_login_uri(self):\n self._validate_uri('initiate_login_uri')", + "docstring": "RI using the https scheme that a third party can use to initiate a login by the RP, as specified in Section 4 of OpenID Connect Core 1.0 [OpenID.Core]. The URI MUST accept requests via both GET and POST. The Client MUST understand the login_hint and iss parameters and SHOULD support the target_link_uri parameter.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_initiate_login_uri arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "sanitize_for_s3", + "source_code": "def sanitize_for_s3(text: str) -> str:\n return re.sub('[^a-zA-Z0-9_-]', '_', text)", + "docstring": "S3 keys can only contain alphanumeric characters, underscores, and dashes. This function replaces all other characters with underscores.", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\file_io_utils.py", + "ast_data": "FunctionDef name:sanitize_for_s3 arg:text arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_to_variant", + "source_code": "def _to_variant(self, batched_input=False, name=None):\n with ops.name_scope(name, 'RaggedToVariant', [self, batched_input]):\n return gen_ragged_conversion_ops.ragged_tensor_to_variant(self.nested_row_splits, self.flat_values, batched_input, name)", + "docstring": "Converts this into a Tensor. If is , then the is unbatched along the zero-th dimension, each component is encoded into a scalar Tensor, and these are stacked to return a 1-D Tensor. If is , then the is encoded as is and a scalar Tensor is returned. Example: >>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]]) >>> rt._to_variant().shape.as_list() [] >>> rt._to_variant(batched_input=True).shape.as_list() [3] Args: batched_input: If , the is unbatched and converted to a vector. Set to by default. name: A name prefix for the returned tensors (optional). Returns: A Tensor that encodes this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:_to_variant arg:self arg:batched_input arg:name arguments arg arg arg With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_control_input", + "source_code": "def is_control_input(name: str) -> str:\n return name and name[0] == '^'", + "docstring": "Returns whether or not the input is a control input.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:is_control_input arg:name arguments arg Return return:yes BoolOp Compare" + }, + { + "library": "pandas", + "name": "_adjust_binner_for_upsample", + "source_code": "def _adjust_binner_for_upsample(self, binner):\n if self.closed == 'right':\n binner = binner[1:]\n else:\n binner = binner[:-1]\n return binner", + "docstring": "Adjust our binner when upsampling. The range of a new index should not be outside specified range", + "type": "method", + "file_path": "pandas\\pandas\\core\\resample.py", + "ast_data": "FunctionDef name:_adjust_binner_for_upsample arg:self arg:binner arguments arg arg If Compare Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "compute_area", + "source_code": "def compute_area(self) -> torch.Tensor:\n coords = self._data.view((-1, 4, 2)) if self._data.ndim == 4 else self._data\n centroid = coords.mean(dim=1, keepdim=True)\n angles = torch.atan2(coords[..., 1] - centroid[..., 1], coords[..., 0] - centroid[..., 0])\n _, clockwise_indices = torch.sort(angles, dim=1, descending=True)\n ordered_corners = torch.gather(coords, 1, clockwise_indices.unsqueeze(-1).expand(-1, -1, 2))\n x, y = (ordered_corners[..., 0], ordered_corners[..., 1])\n area = 0.5 * torch.abs(torch.sum(x * torch.roll(y, 1, 1) - y * torch.roll(x, 1, 1), dim=1))\n return area.view(self._data.shape[:2]) if self._data.ndim == 4 else area", + "docstring": "Return :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\boxes.py", + "ast_data": "FunctionDef name:compute_area arg:self arguments arg Assign Compare Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Assign Call Call Call Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, *args, **kwargs):\n pass", + "docstring": "Executes this callable. This behaves like a regular op - in eager mode, it immediately starts execution, returning results. In graph mode, it creates ops which return symbolic TensorFlow values (like , , etc.). For example, callables typically generate a op, but not always - the exact operations being generated are an internal implementation detail. Args: *args: positional argument for this call **kwargs: keyword arguments for this call Returns: The execution results.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg" + }, + { + "library": "authlib", + "name": "resolve_public_key", + "source_code": "def resolve_public_key(self, request):\n raise NotImplementedError()", + "docstring": "Resolve a public key for decoding ``, developers MUST implement this method in subclass:: def resolve_public_key(self, request): return get_public_key_from_user(request.credential) :return: JWK or Key string", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py", + "ast_data": "FunctionDef name:resolve_public_key arg:self arg:request arguments arg arg Raise Call" + }, + { + "library": "numpy", + "name": "_update_other_results", + "source_code": "def _update_other_results(results, best):\n best_con = best[1]\n bx, by = best_con\n mod_results = []\n for cost, (x, y), con_sets in results:\n if x in best_con or y in best_con:\n continue\n del con_sets[by - int(by > x) - int(by > y)]\n del con_sets[bx - int(bx > x) - int(bx > y)]\n con_sets.insert(-1, best[2][-1])\n mod_con = (x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by))\n mod_results.append((cost, mod_con, con_sets))\n return mod_results", + "docstring": "Update the positions and provisional input_sets of `` contraction.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\einsumfunc.py", + "ast_data": "FunctionDef name:_update_other_results arg:results arg:best arguments arg arg Assign Assign Assign For If BoolOp Compare Compare Call Compare Call Compare Call Compare Call Compare Call Assign Call Compare Call Compare Call Compare Call Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "flush_events", + "source_code": "def flush_events(self):\n pass", + "docstring": "Flush the GUI events for the figure. Interactive backends need to reimplement this method.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:flush_events arg:self arguments arg" + }, + { + "library": "kornia", + "name": "confidence_threshold", + "source_code": "def confidence_threshold(self, layer_index: int) -> float:\n threshold = 0.8 + 0.1 * math.exp(-4.0 * layer_index / self.conf.n_layers)\n return min(max(threshold, 0), 1)", + "docstring": "Scaled confidence threshold.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\lightglue.py", + "ast_data": "FunctionDef name:confidence_threshold arg:self arg:layer_index arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "restore_from_patches", + "source_code": "def restore_from_patches(self, patches: Tensor, grid_size: Tuple[int, int]=(4, 4), pad: Optional[Tuple[int, int, int, int]]=None) -> Tensor:\n if grid_size is None:\n grid_size = self.grid_size\n patches_tensor = patches.view(-1, grid_size[0], grid_size[1], *patches.shape[-3:])\n restored_tensor = concatenate(torch.chunk(patches_tensor, grid_size[0], 1), -2).squeeze(1)\n restored_tensor = concatenate(torch.chunk(restored_tensor, grid_size[1], 1), -1).squeeze(1)\n if pad is not None:\n restored_tensor = fpad(restored_tensor, [-i for i in pad])\n return restored_tensor", + "docstring": "Restore input from patches. Example: >>> import kornia.augmentation as K >>> pas = PatchSequential(K.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=1.0), patchwise_apply=False) >>> out = pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2)) >>> pas.restore_from_patches(out, grid_size=(2, 2)) tensor([[[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]]])", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\patch.py", + "ast_data": "FunctionDef name:restore_from_patches arg:self arg:patches arg:grid_size arg:pad arguments arg arg arg arg If Compare Assign Assign Call Assign Call Call Call Assign Call Call Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ListSnapshotChunksDataset", + "source_code": "class _ListSnapshotChunksDataset(dataset_ops.DatasetSource):\n\n def __init__(self, snapshot_path: str):\n self._snapshot_path = snapshot_path\n variant_tensor = ged_ops.list_snapshot_chunks_dataset(snapshot_path, **self._flat_structure)\n super().__init__(variant_tensor)\n\n @property\n def element_spec(self) -> tensor_spec.TensorSpec:\n return tensor_spec.TensorSpec([], dtypes.string)", + "docstring": "A dataset for listing snapshot chunk files. It supports listing partially written snapshots. When a snapshot is being written, it returns the currently available chunk files.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py", + "ast_data": "ClassDef name:_ListSnapshotChunksDataset FunctionDef name:__init__ arg:self arg:snapshot_path arguments arg arg Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_get_aligned_offsets", + "source_code": "def _get_aligned_offsets(yspans, height, align='baseline'):\n _api.check_in_list(['baseline', 'left', 'top', 'right', 'bottom', 'center'], align=align)\n if height is None:\n height = max((y1 - y0 for y0, y1 in yspans))\n if align == 'baseline':\n yspan = (min((y0 for y0, y1 in yspans)), max((y1 for y0, y1 in yspans)))\n offsets = [0] * len(yspans)\n elif align in ['left', 'bottom']:\n yspan = (0, height)\n offsets = [-y0 for y0, y1 in yspans]\n elif align in ['right', 'top']:\n yspan = (0, height)\n offsets = [height - y1 for y0, y1 in yspans]\n elif align == 'center':\n yspan = (0, height)\n offsets = [(height - (y1 - y0)) * 0.5 - y0 for y0, y1 in yspans]\n return (yspan, offsets)", + "docstring": "Align boxes each specified by their `` is used without checking that it is actually large enough). descent The descent of the packing. offsets The bottom offsets of the boxes.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:_get_aligned_offsets arg:yspans arg:height arg:align arguments arg arg arg Call If Compare Assign Call If Compare Assign Call Call Assign Call If Compare Assign Assign If Compare Assign Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "run_monitored_wait4", + "source_code": "def run_monitored_wait4(code):\n code = textwrap.dedent(code)\n start = time.time()\n process = subprocess.Popen([sys.executable, '-c', code])\n pid, returncode, rusage = os.wait4(process.pid, 0)\n duration = time.time() - start\n max_rss_bytes = get_max_rss_bytes(rusage)\n if returncode != 0:\n raise AssertionError(f'Running failed:\\n{code}')\n return (duration, max_rss_bytes)", + "docstring": "Run code in a new Python process, and monitor peak memory usage. Returns ------- duration : float Duration in seconds (including Python startup time) peak_memusage : int Peak memory usage in bytes of the child Python process Notes ----- Works on Unix platforms (Linux, macOS) that have .", + "type": "function", + "file_path": "scipy\\benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:run_monitored_wait4 arg:code arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_parse_args", + "source_code": "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split('=')\n result[k] = v\n return result", + "docstring": "Parses arguments with the form KEY=VALUE into a dictionary.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\llvm_openmp\\expand_cmake_vars.py", + "ast_data": "FunctionDef name:_parse_args arg:argv arguments arg Assign For Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_is_non_negative_check", + "source_code": "def _is_non_negative_check(cond: sympy.Basic) -> Optional[str]:\n if isinstance(cond, sympy.Rel):\n if cond.rel_op == '>=' and cond.rhs == 0:\n return str(cond.lhs)\n return None", + "docstring": "Check if a condition (SymPy expression) is checking for non-negative values (>= 0). Returns the variable name if it's a non-negative check (>= 0), None otherwise.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:_is_non_negative_check arg:cond arguments arg If Call If BoolOp Compare Compare Return return:yes Call Return return:no" + }, + { + "library": "pandas", + "name": "ExtensionIndex", + "source_code": "class ExtensionIndex(Index):\n _data: IntervalArray | NDArrayBackedExtensionArray\n\n def _validate_fill_value(self, value):\n return self._data._validate_setitem_value(value)\n\n @cache_readonly\n def _isnan(self) -> npt.NDArray[np.bool_]:\n return self._data.isna()", + "docstring": "Index subclass for indexes backed by ExtensionArray.", + "type": "class", + "file_path": "pandas\\pandas\\core\\indexes\\extension.py", + "ast_data": "ClassDef name:ExtensionIndex FunctionDef name:_validate_fill_value arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:_isnan arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_delete_tmp_write_dir", + "source_code": "def _delete_tmp_write_dir(self):\n distributed_file_utils.remove_temp_dirpath(self.log_dir, self.model.distribute_strategy)", + "docstring": "Deletes tmp write directories for multi-worker.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_delete_tmp_write_dir arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, log_interval: float=5, data_collect_interval: float=1, is_debug_mode: bool=False, pynvml_enabled: bool=False, amdsmi_enabled: bool=False) -> None:\n self._log_interval = log_interval\n self._data_collect_interval = data_collect_interval\n self._metadata = UtilizationMetadata(level='metadata', usage_collect_interval=self._data_collect_interval, data_model_version=getDataModelVersion(), job_id=_job_id, job_name=_job_name, workflow_id=_workflow_run_id, workflow_name=_workflow_name, start_at=getTsNow())\n self._has_pynvml = pynvml_enabled\n self._has_amdsmi = amdsmi_enabled\n self._gpu_handles: list[Any] = []\n self._gpu_lib_detected: str = ''\n self._num_of_cpus = 0\n self._debug_mode = is_debug_mode\n self._initial_gpu_handler()\n self.shared_resource = SharedResource()\n self.exit_event = threading.Event()", + "docstring": "log_interval: Time interval in seconds for collecting usage data; default is 5 seconds. is_debug_mode: Useful if you're testing on a local machine and want to see the output in a pretty format with more information.", + "type": "method", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:log_interval arg:data_collect_interval arg:is_debug_mode arg:pynvml_enabled arg:amdsmi_enabled arguments arg arg arg arg arg arg Assign Assign Assign Call Call Call Assign Assign Assign Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_dump_csv", + "source_code": "def _dump_csv(self, filename):\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n for rank in self.pipeline_order:\n writer.writerow(self.pipeline_order[rank])", + "docstring": "Dump a CSV representation of the schedule into a file with the provided filename.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:_dump_csv arg:self arg:filename arguments arg arg With Call Assign Call For Call" + }, + { + "library": "numpy", + "name": "_hist_bin_stone", + "source_code": "def _hist_bin_stone(x, range):\n n = x.size\n ptp_x = _ptp(x)\n if n <= 1 or ptp_x == 0:\n return 0\n\n def jhat(nbins):\n hh = ptp_x / nbins\n p_k = np.histogram(x, bins=nbins, range=range)[0] / n\n return (2 - (n + 1) * p_k.dot(p_k)) / hh\n nbins_upper_bound = max(100, int(np.sqrt(n)))\n nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)\n if nbins == nbins_upper_bound:\n warnings.warn('The number of bins estimated may be suboptimal.', RuntimeWarning, stacklevel=3)\n return ptp_x / nbins", + "docstring": "Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. This paper by Stone appears to be the origination of this rule. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. range : (float, float) The lower and upper range of the bins. Returns ------- h : An estimate of the optimal bin width for the given data.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_histograms_impl.py", + "ast_data": "FunctionDef name:_hist_bin_stone arg:x arg:range arguments arg arg Assign Assign Call If BoolOp Compare Compare Return return:yes FunctionDef name:jhat arg:nbins arguments arg Assign Assign Call Return return:yes Call Assign Call Call Call Assign Call Call If Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "parallel_coordinates", + "source_code": "def parallel_coordinates(frame: DataFrame, class_column: str, cols: list[str] | None=None, ax: Axes | None=None, color: list[str] | tuple[str, ...] | None=None, use_columns: bool=False, xticks: list | tuple | None=None, colormap: Colormap | str | None=None, axvlines: bool=True, axvlines_kwds: Mapping[str, Any] | None=None, sort_labels: bool=False, **kwargs) -> Axes:\n plot_backend = _get_plot_backend('matplotlib')\n return plot_backend.parallel_coordinates(frame=frame, class_column=class_column, cols=cols, ax=ax, color=color, use_columns=use_columns, xticks=xticks, colormap=colormap, axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, **kwargs)", + "docstring": "Parallel coordinates plotting. Parameters ---------- frame : DataFrame The DataFrame to be plotted. class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes The matplotlib axes containing the parallel coordinates plot. See Also -------- plotting.andrews_curves : Generate a matplotlib plot for visualizing clusters of multivariate data. plotting.radviz : Plot a multidimensional dataset in 2D. Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... \" ... \"pandas/main/pandas/tests/io/data/csv/iris.csv\" ... ) # doctest: +SKIP >>> pd.plotting.parallel_coordinates( ... df, \"Name\", color=(\"#556270\", \"#4ECDC4\", \"#C7F464\") ... ) # doctest: +SKIP", + "type": "function", + "file_path": "pandas\\pandas\\plotting\\_misc.py", + "ast_data": "FunctionDef name:parallel_coordinates arg:frame arg:class_column arg:cols arg:ax arg:color arg:use_columns arg:xticks arg:colormap arg:axvlines arg:axvlines_kwds arg:sort_labels arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "mount", + "source_code": "def mount(self, root, script_name='', config=None):\n if script_name is None:\n raise TypeError(\"The 'script_name' argument may not be None. Application objects may, however, possess a script_name of None (in order to inpect the WSGI environ for SCRIPT_NAME upon each request). You cannot mount such Applications on this Tree; you must pass them to a WSGI server interface directly.\")\n script_name = script_name.rstrip('/')\n if isinstance(root, Application):\n app = root\n if script_name != '' and script_name != app.script_name:\n raise ValueError('Cannot specify a different script name and pass an Application instance to cherrypy.mount')\n script_name = app.script_name\n else:\n app = Application(root, script_name)\n needs_favicon = script_name == '' and root is not None and (not hasattr(root, 'favicon_ico'))\n if needs_favicon:\n favicon = os.path.join(os.getcwd(), os.path.dirname(__file__), 'favicon.ico')\n root.favicon_ico = tools.staticfile.handler(favicon)\n if config:\n app.merge(config)\n self.apps[script_name] = app\n return app", + "docstring": "Mount a new app from a root object, script_name, and config. root An instance of a \"controller class\" (a collection of page handler methods) which represents the root of the application. This may also be an Application instance, or None if using a dispatcher other than the default. script_name A string containing the \"mount point\" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to \" then the script_name argument would be \"/dept/app1\". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not \"/\"). config A file or dict containing application config.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:mount arg:self arg:root arg:script_name arg:config arguments arg arg arg arg If Compare Raise Call Assign Call If Call Assign If BoolOp Compare Compare Raise Call Assign Assign Call Assign BoolOp Compare Compare Call If Assign Call Call Call Assign Call If Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "_izip_fields_flat", + "source_code": "def _izip_fields_flat(iterable):\n for element in iterable:\n if isinstance(element, np.void):\n yield from _izip_fields_flat(tuple(element))\n else:\n yield element", + "docstring": "Returns an iterator of concatenated fields from a sequence of arrays, collapsing any nested structure.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:_izip_fields_flat arg:iterable arguments arg For If Call Call Call" + }, + { + "library": "sphinx", + "name": "DownloadFileCollector", + "source_code": "class DownloadFileCollector(EnvironmentCollector):\n\n def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:\n env.dlfiles.purge_doc(docname)\n\n def merge_other(self, app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment) -> None:\n env.dlfiles.merge_other(docnames, other.dlfiles)\n\n def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:\n for node in doctree.findall(addnodes.download_reference):\n targetname = node['reftarget']\n if '://' in targetname:\n node['refuri'] = targetname\n else:\n rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)\n app.env.note_dependency(rel_filename)\n if not os.access(filename, os.R_OK):\n logger.warning(__('download file not readable: %s'), filename, location=node, type='download', subtype='not_readable')\n continue\n node['filename'] = app.env.dlfiles.add_file(app.env.docname, rel_filename).as_posix()", + "docstring": "Download files collector for sphinx.environment.", + "type": "class", + "file_path": "sphinx\\sphinx\\environment\\collectors\\asset.py", + "ast_data": "ClassDef name:DownloadFileCollector FunctionDef name:clear_doc arg:self arg:app arg:env arg:docname arguments arg arg arg arg Call FunctionDef name:merge_other arg:self arg:app arg:env arg:docnames arg:other arguments arg arg arg arg arg Call FunctionDef name:process_doc arg:self arg:app arg:doctree arguments arg arg arg For Call Assign If Compare Assign Assign Call Call If Call Call Call Assign Call Call" + }, + { + "library": "numpy", + "name": "_write_array_header", + "source_code": "def _write_array_header(fp, d, version=None):\n header = ['{']\n for key, value in sorted(d.items()):\n header.append(f\"'{key}': {repr(value)}, \")\n header.append('}')\n header = ''.join(header)\n shape = d['shape']\n header += ' ' * (GROWTH_AXIS_MAX_DIGITS - len(repr(shape[-1 if d['fortran_order'] else 0])) if len(shape) > 0 else 0)\n if version is None:\n header = _wrap_header_guess_version(header)\n else:\n header = _wrap_header(header, version)\n fp.write(header)", + "docstring": "Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version : tuple or None None means use oldest that works. Providing an explicit version will raise a ValueError if the format does not allow saving this data. Default: None", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_format_impl.py", + "ast_data": "FunctionDef name:_write_array_header arg:fp arg:d arg:version arguments arg arg arg Assign For Call Call Call Call Call Assign Call Assign Compare Call Call Call If Compare Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "copy_handle_data", + "source_code": "def copy_handle_data(source_t, target_t):\n if target_t.dtype == dtypes.resource or target_t.dtype == dtypes.variant:\n handle_data = get_handle_data(source_t)\n set_handle_data(target_t, handle_data)", + "docstring": "Copies HandleData for variant and resource type tensors if available. The CppShapeInferenceResult::HandleData proto contains information about the shapes and types of the element tensors of resource/variant type tensors. We need to copy this across function boundaries, i.e., when capturing a placeholder or when returning a function tensor as output. If we don't do this the element tensors will have unknown shapes, e.g., if a TensorList variant tensor is captured as a placeholder, elements popped from that list would have unknown shape. Args: source_t: The tensor to copy HandleData from. target_t: The tensor to copy HandleData to.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\handle_data_util.py", + "ast_data": "FunctionDef name:copy_handle_data arg:source_t arg:target_t arguments arg arg If BoolOp Compare Compare Assign Call Call" + }, + { + "library": "scipy", + "name": "mahalanobis", + "source_code": "def mahalanobis(u, v, VI):\n u = _validate_vector(u)\n v = _validate_vector(v)\n VI = np.atleast_2d(VI)\n delta = u - v\n m = np.dot(np.dot(delta, VI), delta)\n return np.sqrt(m)", + "docstring": "Compute the Mahalanobis distance between two 1-D arrays. The Mahalanobis distance between 1-D arrays and , is defined as .. math:: \\sqrt{ (u-v) V^{-1} (u-v)^T } where `VIuv`. Examples -------- >>> from scipy.spatial import distance >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]] >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv) 1.7320508075688772", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:mahalanobis arg:u arg:v arg:VI arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "AsyncCheckpointerType", + "source_code": "class AsyncCheckpointerType(Enum):\n THREAD = 'thread'\n PROCESS = 'process'", + "docstring": "Enum for async checkpointer type.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_saver.py", + "ast_data": "ClassDef name:AsyncCheckpointerType Assign Assign" + }, + { + "library": "tensorflow", + "name": "__iter__", + "source_code": "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", + "docstring": "Create a generator that iterate over the Sequence.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call" + }, + { + "library": "tensorflow", + "name": "cast_if_floating_dtype", + "source_code": "def cast_if_floating_dtype(x, dtype=None):\n return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype), x)", + "docstring": "Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. dtype: The dtype to which Tensors should be cast. Returns: Converted input.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:cast_if_floating_dtype arg:x arg:dtype arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "deprecate_cython_api", + "source_code": "def deprecate_cython_api(module, routine_name, new_name=None, message=None):\n old_name = f'{module.__name__}.{routine_name}'\n if new_name is None:\n depdoc = f'`{old_name}` is deprecated!'\n else:\n depdoc = f'`{old_name}` is deprecated, use `{new_name}` instead!'\n if message is not None:\n depdoc += '\\n' + message\n d = module.__pyx_capi__\n j = 0\n has_fused = False\n while True:\n fused_name = f'__pyx_fuse_{j}{routine_name}'\n if fused_name in d:\n has_fused = True\n d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)\n j += 1\n else:\n break\n if not has_fused:\n d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)", + "docstring": "Deprecate an exported cdef function in a public Cython API module. Only functions can be deprecated; typedefs etc. cannot. Parameters ---------- module : module Public Cython API module (e.g. scipy.linalg.cython_blas). routine_name : str Name of the routine to deprecate. May also be a fused-type routine (in which case its all specializations are deprecated). new_name : str New name to include in the deprecation warning message message : str Additional text in the deprecation warning message Examples -------- Usually, this function would be used in the top-level of the module `` file: >>> from scipy._lib.deprecation import deprecate_cython_api >>> import scipy.linalg.cython_blas as mod >>> deprecate_cython_api(mod, \"dgemm\", \"dgemm_new\", ... message=\"Deprecated in Scipy 1.5.0\") >>> del deprecate_cython_api, mod After this, Cython modules that use the deprecated function emit a deprecation warning when they are imported.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\deprecation.py", + "ast_data": "FunctionDef name:deprecate_cython_api arg:module arg:routine_name arg:new_name arg:message arguments arg arg arg arg Assign If Compare Assign Assign If Compare Assign Assign Assign While Assign If Compare Assign Assign Call Call If Assign Call Call" + }, + { + "library": "tensorflow", + "name": "std", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef std(x, axis=None, keepdims=False):\n if x.dtype.base_dtype == dtypes_module.bool:\n x = math_ops.cast(x, floatx())\n return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)", + "docstring": "Standard deviation of a tensor, alongside the specified axis. It is an alias to . Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If (the default), reduces all dimensions. Must be in the range . keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of with same dtype. Boolean type input will be converted to float.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:std arg:x arg:axis arg:keepdims arguments arg arg arg If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "RubberbandBase", + "source_code": "class RubberbandBase(ToolBase):\n\n def trigger(self, sender, event, data=None):\n if not self.figure.canvas.widgetlock.available(sender):\n return\n if data is not None:\n self.draw_rubberband(*data)\n else:\n self.remove_rubberband()\n\n def draw_rubberband(self, *data):\n raise NotImplementedError\n\n def remove_rubberband(self):\n pass", + "docstring": "Draw and remove a rubberband.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "ClassDef name:RubberbandBase FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Return return:no If Compare Call Call FunctionDef name:draw_rubberband arg:self arguments arg arg Raise FunctionDef name:remove_rubberband arg:self arguments arg" + }, + { + "library": "kornia", + "name": "InRange", + "source_code": "class InRange(Module):\n\n def __init__(self, lower: Union[tuple[Any, ...], Tensor], upper: Union[tuple[Any, ...], Tensor], return_mask: bool=False) -> None:\n super().__init__()\n self.lower = lower\n self.upper = upper\n self.return_mask = return_mask\n\n def forward(self, input: Tensor) -> Tensor:\n return in_range(input, self.lower, self.upper, self.return_mask)", + "docstring": "Create a module for applying lower and upper bounds to input tensors. Args: input: The input tensor to be filtered. lower: The lower bounds of the filter (inclusive). upper: The upper bounds of the filter (inclusive). return_mask: If is true, the filtered mask is returned, otherwise the filtered input image. Returns: A binary mask :math: of input indicating whether elements are within the range or filtered input image :math:. .. note:: View complete documentation in :func:. Examples: >>> rng = torch.manual_seed(1) >>> input = torch.rand(1, 3, 3, 3) >>> lower = (0.2, 0.3, 0.4) >>> upper = (0.8, 0.9, 1.0) >>> mask = InRange(lower, upper, return_mask=True)(input) >>> mask tensor([[[[1., 1., 0.], [0., 0., 0.], [0., 1., 1.]]]])", + "type": "class", + "file_path": "kornia\\kornia\\filters\\in_range.py", + "ast_data": "ClassDef name:InRange FunctionDef name:__init__ arg:self arg:lower arg:upper arg:return_mask arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "with_empty_output", + "source_code": "def with_empty_output(self):\n self._options['output'] = 'none'\n return self", + "docstring": "Do not generate side-effect outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py", + "ast_data": "FunctionDef name:with_empty_output arg:self arguments arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "select_data_adapter", + "source_code": "def select_data_adapter(x, y):\n adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]\n if not adapter_cls:\n raise ValueError('Failed to find data adapter that can handle input: {}, {}'.format(_type_name(x), _type_name(y)))\n elif len(adapter_cls) > 1:\n raise RuntimeError('Data adapters should be mutually exclusive for handling inputs. Found multiple adapters {} to handle input: {}, {}'.format(adapter_cls, _type_name(x), _type_name(y)))\n return adapter_cls[0]", + "docstring": "Selects a data adapter than can handle a given x and y.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:select_data_adapter arg:x arg:y arguments arg arg Assign Call If Raise Call Call Call Call If Compare Call Raise Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "redirect", + "source_code": "@contextmanager\ndef redirect(std: str, to_file: str):\n if std not in _VALID_STD:\n raise ValueError(f'unknown standard stream <{std}>, must be one of {_VALID_STD}')\n c_std = _c_std(std)\n python_std = _python_std(std)\n std_fd = python_std.fileno()\n\n def _redirect(dst):\n libc.fflush(c_std)\n python_std.flush()\n os.dup2(dst.fileno(), std_fd)\n with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode='w+b') as dst:\n _redirect(dst)\n try:\n yield\n finally:\n _redirect(orig_std)", + "docstring": "Redirect `` is assumed to exist and the destination file is overwritten if it already exists. .. note:: Due to buffering cross source writes are not guaranteed to appear in wall-clock order. For instance in the example below it is possible for the C-outputs to appear before the python outputs in the log file. Usage: :: # syntactic-sugar for redirect(\"stdout\", \"tmp/stdout.log\") with redirect_stdout(\"/tmp/stdout.log\"): print(\"python stdouts are redirected\") libc = ctypes.CDLL(\"libc.so.6\") libc.printf(b\"c stdouts are also redirected\" os.system(\"echo system stdouts are also redirected\") print(\"stdout restored\")", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\redirects.py", + "ast_data": "FunctionDef name:redirect arg:std arg:to_file arguments arg arg If Compare Raise Call Assign Call Assign Call Assign Call FunctionDef name:_redirect arg:dst arguments arg Call Call Call Call With Call Call Call Call Try Call" + }, + { + "library": "matplotlib", + "name": "minpos", + "source_code": "@property\ndef minpos(self):\n return self._minpos", + "docstring": "The minimum positive value in both directions within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum extent instead of *p0*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:minpos arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "clear_signature_defs", + "source_code": "def clear_signature_defs(tflite_model):\n model = tflite_model\n if not isinstance(tflite_model, bytearray):\n model = bytearray(tflite_model)\n return signature_def_util.ClearSignatureDefs(model)", + "docstring": "Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to remove signature_defs. Returns: buffer: A TFLite model binary identical to model buffer with no SignatureDef metadata. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\signature\\signature_def_utils.py", + "ast_data": "FunctionDef name:clear_signature_defs arg:tflite_model arguments arg Assign If Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "predict_step", + "source_code": "def predict_step(self, data):\n data = data_adapter.expand_1d(data)\n x, _, _ = data_adapter.unpack_x_y_sample_weight(data)\n return self(x, training=False)", + "docstring": "The logic for one inference step. This method can be overridden to support custom inference logic. This method is called by . This method should contain the mathematical logic for one step of inference. This typically includes the forward pass. Configuration details for *how* this logic is run (e.g. and settings), should be left to , which can also be overridden. Args: data: A nested structure of s. Returns: The result of one inference step, typically the output of calling the on data.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:predict_step arg:self arg:data arguments arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_call_end_callbacks_on_future", + "source_code": "def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:\n if not self.run_callbacks_on_exit:\n raise RuntimeError('_call_end_callbacks_on_future can only be called once.')\n self.run_callbacks_on_exit = False\n record = self.record\n assert record is not None\n if not torch.jit.is_scripting():\n with torch._C.DisableTorchFunctionSubclass():\n profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction(record, fut)\n else:\n profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)\n return profiled_future", + "docstring": "Use for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times. Args: fut: (torch._C.Future): future for which to schedule callback for. Returns: A future that completes with the value of the passed in future when the profiling callbacks have ran.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler.py", + "ast_data": "FunctionDef name:_call_end_callbacks_on_future arg:self arg:fut arguments arg arg If Raise Call Assign Assign Compare If Call With Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, bytes_per_pack=0, timeout_seconds=None, implementation=CommunicationImplementation.AUTO):\n pass", + "docstring": "Creates a CollectiveHints. Args: bytes_per_pack: a non-negative integer. Breaks collective operations into packs of certain size. If it's zero, the value is determined automatically. This hint is respected by all multi-replica strategies except . timeout_seconds: a float or None, timeout in seconds. If not None, the collective raises if it takes longer than this timeout. Zero disables timeout. This can be useful when debugging hanging issues. This should only be used for debugging since it creates a new thread for each collective, i.e. an overhead of more threads. This only works for . implementation: a . This is a hint on the preferred communication implementation. Possible values include , , and . NCCL is generally more performant for GPU, but doesn't work for CPU. This only works for . Raises: ValueError: When arguments have invalid value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arg:implementation arguments arg arg arg arg" + }, + { + "library": "django", + "name": "__getitem__", + "source_code": "def __getitem__(self, k):\n if not isinstance(k, (int, slice)):\n raise TypeError('QuerySet indices must be integers or slices, not %s.' % type(k).__name__)\n if isinstance(k, int) and k < 0 or (isinstance(k, slice) and (k.start is not None and k.start < 0 or (k.stop is not None and k.stop < 0))):\n raise ValueError('Negative indexing is not supported.')\n if self._result_cache is not None:\n return self._result_cache[k]\n if isinstance(k, slice):\n qs = self._chain()\n if k.start is not None:\n start = int(k.start)\n else:\n start = None\n if k.stop is not None:\n stop = int(k.stop)\n else:\n stop = None\n qs.query.set_limits(start, stop)\n return list(qs)[::k.step] if k.step else qs\n qs = self._chain()\n qs.query.set_limits(k, k + 1)\n qs._fetch_all()\n return qs._result_cache[0]", + "docstring": "Retrieve an item or slice from the set of results.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:k arguments arg arg If Call Raise Call Call If BoolOp BoolOp Call Compare BoolOp Call BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call If Compare Return return:yes If Call Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Assign Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "_get_weight_url", + "source_code": "def _get_weight_url(variant: str) -> str:\n KORNIA_CHECK(variant in _AVAILABLE_WEIGHTS, f'Variant {variant} does not have pre-trained checkpoint')\n model_type, patch_size = variant.split('/')\n return f'https://huggingface.co/kornia/{model_type}{patch_size}_augreg_i21k_r224/resolve/main/{model_type}-{patch_size}.pth'", + "docstring": "Return the URL of the model weights.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\vit.py", + "ast_data": "FunctionDef name:_get_weight_url arg:variant arguments arg Call Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "stateless_dropout", + "source_code": "@dispatch.dispatch_for_api(nn_ops.stateless_dropout)\ndef stateless_dropout(x: ragged_tensor.Ragged, rate, seed, rng_alg=None, noise_shape=None, name=None):\n if noise_shape is not None:\n raise ValueError('noise_shape is not supported yet for RaggedTensor x')\n with ops.name_scope(name, 'RaggedNNStatelessDropout', [x, rate]):\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n return x.with_flat_values(nn_ops.stateless_dropout(x.flat_values, rate=rate, seed=seed, rng_alg=rng_alg))", + "docstring": "Ragged dispatch target for tf.nn.experimental.stateless_dropout.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:stateless_dropout arg:x arg:rate arg:seed arg:rng_alg arg:noise_shape arg:name arguments arg arg arg arg arg arg If Compare Raise Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "_format_ticks", + "source_code": "def _format_ticks(self, idx, direction, factor, levels):\n fmt = _api.check_getitem({1: self.tick_formatter1, 2: self.tick_formatter2}, idx=idx)\n return fmt.format_ticks(levels) if isinstance(fmt, mticker.Formatter) else fmt(direction, factor, levels)", + "docstring": "Helper to support both standard formatters (inheriting from ) and axisartist-specific ones; should be called instead of directly calling ``. This method should be considered as a temporary workaround which will be removed in the future at the same time as axisartist-specific formatters.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py", + "ast_data": "FunctionDef name:_format_ticks arg:self arg:idx arg:direction arg:factor arg:levels arguments arg arg arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "ExpandingIndexer", + "source_code": "class ExpandingIndexer(BaseIndexer):\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n return (np.zeros(num_values, dtype=np.int64), np.arange(1, num_values + 1, dtype=np.int64))", + "docstring": "Calculate expanding window bounds, mimicking df.expanding()", + "type": "class", + "file_path": "pandas\\pandas\\core\\indexers\\objects.py", + "ast_data": "ClassDef name:ExpandingIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "authlib", + "name": "validate_iat", + "source_code": "def validate_iat(self, now, leeway):\n if 'iat' in self:\n iat = self['iat']\n if not _validate_numeric_time(iat):\n raise InvalidClaimError('iat')\n if iat > now + leeway:\n raise InvalidTokenError(description='The token is not valid as it was issued in the future')", + "docstring": "The \"iat\" (issued at) claim identifies the time at which the JWT was issued. This claim can be used to determine the age of the JWT. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py", + "ast_data": "FunctionDef name:validate_iat arg:self arg:now arg:leeway arguments arg arg arg If Compare Assign If Call Raise Call If Compare Raise Call" + }, + { + "library": "matplotlib", + "name": "infodict", + "source_code": "def infodict(self):\n return self._ensure_file().infoDict", + "docstring": "Return a modifiable information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary').", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:infodict arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "ThetaFormatter", + "source_code": "class ThetaFormatter(Formatter):\n\n def __init__(self, round_to=1.0):\n self._round_to = round_to\n\n def __call__(self, x, pos=None):\n degrees = round(np.rad2deg(x) / self._round_to) * self._round_to\n return f'{degrees:0.0f}°'", + "docstring": "Used to format the theta tick labels. Converts the native unit of radians into degrees and adds a degree symbol.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py", + "ast_data": "ClassDef name:ThetaFormatter FunctionDef name:__init__ arg:self arg:round_to arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "obrientransform", + "source_code": "def obrientransform(*samples):\n TINY = np.sqrt(np.finfo(float).eps)\n arrays = []\n sLast = None\n for sample in samples:\n a = np.asarray(sample)\n n = len(a)\n mu = np.mean(a)\n sq = (a - mu) ** 2\n sumsq = sq.sum()\n t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))\n var = sumsq / (n - 1)\n if abs(var - np.mean(t)) > TINY:\n raise ValueError('Lack of convergence in obrientransform.')\n arrays.append(t)\n sLast = a.shape\n if sLast:\n for arr in arrays[:-1]:\n if sLast != arr.shape:\n return np.array(arrays, dtype=object)\n return np.array(arrays)", + "docstring": "Compute the O'Brien transform on input data (any number of arrays). Used to test for homogeneity of variance prior to running one-way stats. Each array in `f_onewayscipy.stats.f_oneway` for significance, we cannot conclude that the variances are different.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:obrientransform arguments arg Assign Call Call Assign Assign For Assign Call Assign Call Assign Call Assign Assign Call Assign Assign If Compare Call Call Raise Call Call Assign If For If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_from_args_and_kwargs", + "source_code": "@staticmethod\ndef get_from_args_and_kwargs(*args, **kwargs):\n if isinstance(args[0], Triangulation):\n triangulation, *args = args\n if 'triangles' in kwargs:\n _api.warn_external(\"Passing the keyword 'triangles' has no effect when also passing a Triangulation\")\n if 'mask' in kwargs:\n _api.warn_external(\"Passing the keyword 'mask' has no effect when also passing a Triangulation\")\n else:\n x, y, triangles, mask, args, kwargs = Triangulation._extract_triangulation_params(args, kwargs)\n triangulation = Triangulation(x, y, triangles, mask)\n return (triangulation, args, kwargs)", + "docstring": "Return a Triangulation object from the args and kwargs, and the remaining args and kwargs with the consumed values removed. There are two alternatives: either the first argument is a Triangulation object, in which case it is returned, or the args and kwargs are sufficient to create a new Triangulation to return. In the latter case, see Triangulation.__init__ for the possible args and kwargs.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py", + "ast_data": "FunctionDef name:get_from_args_and_kwargs arguments arg arg If Call Assign If Compare Call If Compare Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "record_choice", + "source_code": "@staticmethod\ndef record_choice(multi_kernel_name: str, picked_kernel_name: str):\n from torch._inductor.graph import GraphLowering\n if not isinstance(V.graph, GraphLowering):\n return\n if not V.graph.record_multi_kernel_choice:\n return\n V.graph.multi_kernel_to_choice[multi_kernel_name] = picked_kernel_name", + "docstring": "Record the multi-kernel choice for cpp-wrapper after autotuning We should do nothing if this function is not called during codegen.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py", + "ast_data": "FunctionDef name:record_choice arg:multi_kernel_name arg:picked_kernel_name arguments arg arg If Call Return return:no If Return return:no Assign" + }, + { + "library": "tensorflow", + "name": "reverse", + "source_code": "@dispatch.dispatch_for_api(array_ops.reverse)\ndef reverse(tensor: ragged_tensor.Ragged, axis, name=None):\n type_error_msg = '`axis` must be a list of int or a constant tensorwhen reversing axes in a ragged tensor'\n with ops.name_scope(name, 'Reverse', [tensor, axis]):\n if isinstance(axis, tensor_lib.Tensor):\n axis = tensor_util.constant_value(axis)\n if axis is None:\n raise TypeError(type_error_msg)\n elif not (isinstance(axis, (list, tuple)) and all((isinstance(dim, int) for dim in axis))):\n raise TypeError(type_error_msg)\n tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, name='tensor')\n axis = [array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i, 'rank(tensor)') for i, dim in enumerate(axis)]\n slices = [slice(None)] * (max(axis) + 1 if axis else 0)\n for dim in axis:\n slices[dim] = slice(None, None, -1)\n return tensor[tuple(slices)]", + "docstring": "Reverses a RaggedTensor along the specified axes. #### Example: >>> data = tf.ragged.constant([ ... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]]) >>> tf.reverse(data, axis=[0, 2]) Args: tensor: A 'RaggedTensor' to reverse. axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of the axes to reverse. name: A name prefix for the returned tensor (optional). Returns: A 'RaggedTensor'.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:reverse arg:tensor arg:axis arg:name arguments arg arg arg Assign With Call If Call Assign Call If Compare Raise Call If BoolOp Call Call Call Raise Call Assign Call Assign Call Call Assign Call Call For Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, tensors, every_n_iter=None, every_n_secs=None, at_end=False, formatter=None):\n only_log_at_end = at_end and every_n_iter is None and (every_n_secs is None)\n if not only_log_at_end and (every_n_iter is None) == (every_n_secs is None):\n raise ValueError('either at_end and/or exactly one of every_n_iter and every_n_secs must be provided.')\n if every_n_iter is not None and every_n_iter <= 0:\n raise ValueError('invalid every_n_iter=%s.' % every_n_iter)\n if not isinstance(tensors, dict):\n self._tag_order = tensors\n tensors = {item: item for item in tensors}\n else:\n self._tag_order = sorted(tensors.keys())\n self._tensors = tensors\n self._formatter = formatter\n self._timer = NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter)\n self._log_at_end = at_end", + "docstring": "Initializes a . Args: tensors: that maps string-valued tags to tensors/tensor names, or of tensors/tensor names. every_n_iter: , print the values of once every N local steps taken on the current worker. every_n_secs: or , print the values of once every N seconds. Exactly one of and should be provided. at_end: specifying whether to print the values of at the end of the run. formatter: function, takes dict of -> and returns a string. If uses default printing all tensors. Raises: ValueError: if is non-positive.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:tensors arg:every_n_iter arg:every_n_secs arg:at_end arg:formatter arguments arg arg arg arg arg arg Assign BoolOp Compare Compare If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Call Assign Assign Assign Call Call Assign Assign Assign Call Call Assign" + }, + { + "library": "django", + "name": "__iter__", + "source_code": "def __iter__(self):\n for i in range(self.size):\n yield self[i]", + "docstring": "Iterate over each point in the coordinate sequence.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call" + }, + { + "library": "pytorch", + "name": "benchmark_codegened_module", + "source_code": "def benchmark_codegened_module(self, module: ModuleType) -> tuple[float, str]:\n raise NotImplementedError", + "docstring": "Benchmark a compiled module and return the execution time in milliseconds on randomly generated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:benchmark_codegened_module arg:self arg:module arguments arg arg Raise" + }, + { + "library": "scipy", + "name": "BiggsExp03", + "source_code": "class BiggsExp03(Benchmark):\n\n def __init__(self, dimensions=3):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0] * 3, [20] * 3))\n self.global_optimum = [[1.0, 10.0, 5.0]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n t = arange(1.0, 11.0) * 0.1\n y = exp(-t) - 5 * exp(-10 * t)\n vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2\n return sum(vec)", + "docstring": "BiggsExp03 objective function. The BiggsExp03 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix}\\ f_{\\text{BiggsExp03}}(x) = \\sum_{i=1}^{10} (e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\\\ t_i = 0.1i\\\\ y_i = e^{-t_i} - 5e^{-10 t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", + "ast_data": "ClassDef name:BiggsExp03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "mod", + "source_code": "def mod(a: Tensor, b: int) -> Tensor:\n return a - a // b * b", + "docstring": "Compute the modulo operation for two numbers. This function calculates the remainder of the division of 'a' by 'b' using the formula: a - (a // b) * b, which is equivalent to the modulo operation. Args: a: The dividend. b: The divisor. Returns: The remainder of a divided by b. Example: >>> mod(7, 3) 1", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\post_processor.py", + "ast_data": "FunctionDef name:mod arg:a arg:b arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "variables", + "source_code": "@property\ndef variables(self):\n return tuple(self._flatten(predicate=_is_variable, expand_composites=True))", + "docstring": "Sequence of variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\module\\module.py", + "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "readline", + "source_code": "def readline(self, size=None):\n return self.fp.readline(size)", + "docstring": "Read a line of bytes from the connection.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:readline arg:self arg:size arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "LineIterator", + "source_code": "class LineIterator:\n\n def __init__(self, iterable):\n object.__init__(self)\n self.iterable = iter(iterable)\n self.lineno = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.lineno += 1\n line = next(self.iterable)\n line = line.rstrip()\n return line\n next = __next__", + "docstring": "LineIterator(iterable) Return rstrip()'d lines from iterable, while keeping a count of the line number in the .lineno attribute.", + "type": "class", + "file_path": "numpy\\numpy\\linalg\\lapack_lite\\fortran.py", + "ast_data": "ClassDef name:LineIterator FunctionDef name:__init__ arg:self arg:iterable arguments arg arg Call Assign Call Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Assign Call Assign Call Return return:yes Assign" + }, + { + "library": "tensorflow", + "name": "GlobalMaxPooling3D", + "source_code": "class GlobalMaxPooling3D(GlobalPooling3D):\n\n def call(self, inputs):\n if self.data_format == 'channels_last':\n return backend.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)\n else:\n return backend.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)", + "docstring": "Global Max pooling operation for 3D data. Args: data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . It defaults to the value found in your Keras config file at . If you never set it, then it will be \"channels_last\". keepdims: A boolean, whether to keep the spatial dimensions or not. If is (default), the rank of the tensor is reduced for spatial dimensions. If is , the spatial dimensions are retained with length 1. The behavior is the same as for or . Input shape: - If : 5D tensor with shape: - If : 5D tensor with shape: Output shape: - If =False: 2D tensor with shape . - If =True: - If : 5D tensor with shape - If : 5D tensor with shape", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\pooling.py", + "ast_data": "ClassDef name:GlobalMaxPooling3D FunctionDef name:call arg:self arg:inputs arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "replica_local_fn", + "source_code": "def replica_local_fn(*args, **kwargs):\n if any((isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs)))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs)\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result()\n result_t._metric_obj = self\n return result_t", + "docstring": "Updates the state of the metric in a replica-local context.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:replica_local_fn arguments arg arg If Call Call Call Assign Assign Call Assign If Compare Call With Call Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_gamma1p", + "source_code": "def _gamma1p(vals):\n res = gamma(vals + 1)\n if isinstance(res, np.ndarray):\n if not _is_subdtype(vals.dtype, 'c'):\n res[vals == -1] = np.nan\n elif np.isinf(res) and vals == -1:\n res = np.float64('nan')\n return res", + "docstring": "returns gamma(n+1), though with NaN at -1 instead of inf, c.f. #21827", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:_gamma1p arg:vals arguments arg Assign Call If Call If Call Assign Compare If BoolOp Call Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_weights", + "source_code": "def get_weights(self):\n strategy = self._distribution_strategy or self._compile_time_distribution_strategy\n if strategy:\n with strategy.scope():\n return base_layer.Layer.get_weights(self)\n return base_layer.Layer.get_weights(self)", + "docstring": "Retrieves the weights of the model. Returns: A flat list of Numpy arrays.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:get_weights arg:self arguments arg Assign BoolOp If With Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_previous_month", + "source_code": "def get_previous_month(self, date):\n return _get_next_prev(self, date, is_previous=True, period='month')", + "docstring": "Get the previous valid month.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_previous_month arg:self arg:date arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "symmetric_transfer_error", + "source_code": "def symmetric_transfer_error(pts1: Tensor, pts2: Tensor, H: Tensor, squared: bool=True, eps: float=1e-08) -> Tensor:\n KORNIA_CHECK_SHAPE(H, ['B', '3', '3'])\n if pts1.size(-1) == 3:\n pts1 = convert_points_from_homogeneous(pts1)\n if pts2.size(-1) == 3:\n pts2 = convert_points_from_homogeneous(pts2)\n max_num = torch.finfo(pts1.dtype).max\n H_inv, good_H = safe_inverse_with_mask(H)\n there: Tensor = oneway_transfer_error(pts1, pts2, H, True, eps)\n back: Tensor = oneway_transfer_error(pts2, pts1, H_inv, True, eps)\n good_H_reshape: Tensor = good_H.view(-1, 1).expand_as(there)\n out = (there + back) * good_H_reshape.to(there.dtype) + max_num * (~good_H_reshape).to(there.dtype)\n if squared:\n return out\n return (out + eps).sqrt()", + "docstring": "Return Symmetric transfer error for correspondences given the homography matrix. Args: pts1: correspondences from the left images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. pts2: correspondences from the right images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. H: Homographies with shape :math:. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed distance with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\homography.py", + "ast_data": "FunctionDef name:symmetric_transfer_error arg:pts1 arg:pts2 arg:H arg:squared arg:eps arguments arg arg arg arg arg Call If Compare Call Assign Call If Compare Call Assign Call Assign Call Assign Call Call Call Call Call Assign Call Call If Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "wait", + "source_code": "def wait(self) -> None:\n torch._C._mps_waitForEvent(self.__eventId)", + "docstring": "Makes all future work submitted to the default stream wait for this event.", + "type": "method", + "file_path": "pytorch\\torch\\mps\\event.py", + "ast_data": "FunctionDef name:wait arg:self arguments arg Call" + }, + { + "library": "scipy", + "name": "diagsvd", + "source_code": "@_apply_over_batch(('s', 1))\ndef diagsvd(s, M, N):\n part = diag(s)\n typ = part.dtype.char\n MorN = len(s)\n if MorN == M:\n return np.hstack((part, zeros((M, N - M), dtype=typ)))\n elif MorN == N:\n return r_[part, zeros((M - N, N), dtype=typ)]\n else:\n raise ValueError('Length of s must be M or N.')", + "docstring": "Construct the sigma matrix in SVD from singular values and size M, N. Parameters ---------- s : (M,) or (N,) array_like Singular values M : int Size of the matrix whose singular values are . N : int Size of the matrix whose singular values are . Returns ------- S : (M, N) ndarray The S-matrix in the singular value decomposition See Also -------- svd : Singular value decomposition of a matrix svdvals : Compute singular values of a matrix. Examples -------- >>> import numpy as np >>> from scipy.linalg import diagsvd >>> vals = np.array([1, 2, 3]) # The array representing the computed svd >>> diagsvd(vals, 3, 4) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0]]) >>> diagsvd(vals, 4, 3) array([[1, 0, 0], [0, 2, 0], [0, 0, 3], [0, 0, 0]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp_svd.py", + "ast_data": "FunctionDef name:diagsvd arg:s arg:M arg:N arguments arg arg arg Assign Call Assign Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Raise Call Call" + }, + { + "library": "numpy", + "name": "_hist_bin_sturges", + "source_code": "def _hist_bin_sturges(x, range):\n del range\n return _ptp(x) / (np.log2(x.size) + 1.0)", + "docstring": "Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_histograms_impl.py", + "ast_data": "FunctionDef name:_hist_bin_sturges arg:x arg:range arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "handle_sym_dispatch", + "source_code": "def handle_sym_dispatch(func: Callable[_P, R], args: _P.args, kwargs: _P.kwargs) -> R:\n mode = get_proxy_mode()\n assert mode\n with disable_proxy_modes_tracing():\n types: list[type] = []\n return mode.__sym_dispatch__(func, types, args, kwargs)", + "docstring": "Call into the currently active proxy tracing mode to do a SymInt/SymFloat/SymBool dispatch trace on a function that operates on these arguments.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py", + "ast_data": "FunctionDef name:handle_sym_dispatch arg:func arg:args arg:kwargs arguments arg arg arg Assign Call With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_standardize_shapes", + "source_code": "def _standardize_shapes(path, tensor, shape):\n if not isinstance(tensor, torch.Tensor):\n return None\n if shape is None:\n return [Dim.STATIC] * len(tensor.shape)\n out = []\n if isinstance(shape, dict):\n for i, s in enumerate(tensor.shape):\n out.append(s if shape.get(i) is None else shape.get(i))\n else:\n assert isinstance(shape, (tuple, list))\n for i, s in enumerate(tensor.shape):\n out.append(s if shape[i] is None else shape[i])\n return out", + "docstring": "Helps standardize the dynamic_shapes tree structure we serialize, returning lists for each tensor shape, handling tensor-level Nones.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\serde\\dynamic_shapes.py", + "ast_data": "FunctionDef name:_standardize_shapes arg:path arg:tensor arg:shape arguments arg arg arg If Call Return return:no If Compare Return return:yes Call Assign If Call For Call Call Compare Call Call Call For Call Call Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "_reset_build_compile_trackers", + "source_code": "def _reset_build_compile_trackers(model):\n model.built = False\n model.inputs = None\n model.outputs = None\n model._is_compiled = False\n if not ops.executing_eagerly_outside_functions():\n model._v1_compile_was_called = False\n model.optimizer = None", + "docstring": "Reset state trackers for model. Note that we do not actually zero out attributes such as optimizer, but instead rely on the expectation that all of the attrs will be over-written on calling build/compile/etc. This is somewhat fragile, insofar as we check elsewhere for the presence of these attributes as evidence of having been built/compiled/etc. Pending a better way to do this, we reset key attributes here to allow building and compiling. Args: model: the model that is being reset", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py", + "ast_data": "FunctionDef name:_reset_build_compile_trackers arg:model arguments arg Assign Assign Assign Assign If Call Assign Assign" + }, + { + "library": "pytorch", + "name": "get_bwd_recv_ops", + "source_code": "def get_bwd_recv_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:\n if not self.has_backward or self.is_last:\n return []\n recv_infos = self.grad_recv_info[bwd_chunk_id]\n return self._get_recv_ops(recv_infos)", + "docstring": "Returns a list of ops that are needed to receive the gradients for this stage.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:get_bwd_recv_ops arg:self arg:bwd_chunk_id arguments arg arg If BoolOp Return return:no Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_add_text", + "source_code": "def _add_text(self, txt):\n _api.check_isinstance(mtext.Text, txt=txt)\n self._set_artist_props(txt)\n self._children.append(txt)\n txt._remove_method = self._children.remove\n self.stale = True\n return txt", + "docstring": "Add a to the Axes; return the text.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_add_text arg:self arg:txt arguments arg arg Call Call Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_mirrored_tpu_replicated_variables", + "source_code": "def _create_mirrored_tpu_replicated_variables(**kwargs):\n initial_value = kwargs['initial_value']\n with maybe_init_scope():\n initial_value = initial_value() if callable(initial_value) else initial_value\n mirrored_replicated_var_list = []\n for replica_id in range(num_replicas):\n replicated_var_list = []\n for logic_core_id in range(num_cores_per_replica):\n with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n kwargs['initial_value'] = initial_value\n v = next_creator(**kwargs)\n replicated_var_list.append(v)\n replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n mirrored_replicated_var_list.append(tpu_replicated_var)\n return mirrored_replicated_var_list", + "docstring": "Returns a list of s. The list consists of s and can be used to initialize a . Each contains a list of s which are replicated to logical cores to enable XLA SPMD compilation. Args: **kwargs: the keyword arguments for creating a variable", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_create_mirrored_tpu_replicated_variables arguments arg Assign With Call Assign Call Call Assign For Call Assign For Call With Call Assign Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "UUIDTextMixin", + "source_code": "class UUIDTextMixin:\n\n def process_rhs(self, qn, connection):\n if not connection.features.has_native_uuid_field:\n from django.db.models.functions import Replace\n if self.rhs_is_direct_value():\n self.rhs = Value(self.rhs)\n self.rhs = Replace(self.rhs, Value('-'), Value(''), output_field=CharField())\n rhs, params = super().process_rhs(qn, connection)\n return (rhs, params)", + "docstring": "Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID.", + "type": "class", + "file_path": "django\\django\\db\\models\\lookups.py", + "ast_data": "ClassDef name:UUIDTextMixin FunctionDef name:process_rhs arg:self arg:qn arg:connection arguments arg arg arg If If Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_per_worker_resources", + "source_code": "def _create_per_worker_resources(self, fn, args=None, kwargs=None):\n results = []\n for w in self._cluster.workers:\n results.append(w.create_resource(fn, args=args, kwargs=kwargs))\n return PerWorkerValues(tuple(results))", + "docstring": "Synchronously create resources on the workers. The resources are represented by s. Args: fn: The function to be dispatched to all workers for execution asynchronously. args: Positional arguments for . kwargs: Keyword arguments for . Returns: A object, which wraps a tuple of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_create_per_worker_resources arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg Assign For Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ResidualWrapper", + "source_code": "@deprecated(None, 'Please use tf.keras.layers.RNN instead.')\n@tf_export('nn.RNNCellResidualWrapper', v1=[])\nclass ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase, _RNNCellWrapperV2):\n\n def __init__(self, *args, **kwargs):\n super(ResidualWrapper, self).__init__(*args, **kwargs)\n __init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__", + "docstring": "RNNCell wrapper that ensures cell inputs are added to the outputs.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py", + "ast_data": "ClassDef name:ResidualWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Call" + }, + { + "library": "authlib", + "name": "validate_registration_endpoint", + "source_code": "def validate_registration_endpoint(self):\n url = self.get('registration_endpoint')\n if url and (not is_secure_transport(url)):\n raise ValueError('\"registration_endpoint\" MUST use \"https\" scheme')", + "docstring": "OPTIONAL. URL of the authorization server's OAuth 2.0 Dynamic Client Registration endpoint [RFC7591].", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_registration_endpoint arg:self arguments arg Assign Call If BoolOp Call Raise Call" + }, + { + "library": "matplotlib", + "name": "make_dvi", + "source_code": "@classmethod\ndef make_dvi(cls, tex, fontsize):\n basefile = cls.get_basefile(tex, fontsize)\n dvifile = '%s.dvi' % basefile\n if not os.path.exists(dvifile):\n texfile = Path(cls.make_tex(tex, fontsize))\n cwd = Path(dvifile).parent\n with TemporaryDirectory(dir=cwd) as tmpdir:\n tmppath = Path(tmpdir)\n cls._run_checked_subprocess(['latex', '-interaction=nonstopmode', '--halt-on-error', f'--output-directory={tmppath.name}', f'{texfile.name}'], tex, cwd=cwd)\n (tmppath / Path(dvifile).name).replace(dvifile)\n return dvifile", + "docstring": "Generate a dvi file containing latex's layout of tex string. Return the file name.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:make_dvi arg:cls arg:tex arg:fontsize arguments arg arg arg Assign Call Assign If Call Assign Call Call Assign Call With Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "size_internal", + "source_code": "def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n if context.executing_eagerly() and (not hasattr(input, 'graph')) and (not isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):\n input = ops.convert_to_tensor(input)\n np_out_type = out_type.as_numpy_dtype\n num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)\n return ops.convert_to_tensor(num_elements, dtype=out_type)\n with ops.name_scope(name, 'Size', [input]) as name:\n if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize:\n if input_shape.is_fully_defined():\n return constant(input_shape.num_elements(), out_type, name=name)\n if input_shape.dims and any((dim == 0 for dim in input_shape.dims)):\n return constant(0, out_type, name=name)\n return gen_array_ops.size(input, name=name, out_type=out_type)", + "docstring": "Returns the size of a tensor. Args: input: A or . name: A name for the operation (optional). optimize: if true, encode the size as a constant when possible. out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to . Returns: A of type . Defaults to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:size_internal arg:input arg:name arg:optimize arg:out_type arguments arg arg arg arg If BoolOp Call Call Call Assign Call Assign Assign Call Call Return return:yes Call With Call If Call Return return:yes Call Call Assign Call Assign Call If If Call Return return:yes Call Call If BoolOp Call Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "local_device_locations", + "source_code": "def local_device_locations(self) -> List[Dict[str, int]]:\n mapping = self.unravel_index()\n return [mapping[device_id] for device_id in self.local_device_ids()]", + "docstring": "Returns a list of local device locations. A device location is a dictionary from dimension names to indices on those dimensions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:local_device_locations arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "add_input", + "source_code": "def add_input(self, *args, **kwargs):\n return self._inputs.add(*args, **kwargs)", + "docstring": "Add a wrapped input argument to the hint. Args: *args: The input tensor. **kwargs: \"name\" label \"tag\" a tag to group multiple arguments that will be aggregated. I.e. a string like 'cool_input'. Basically multiple inputs can be added to the same hint for parallel operations that will eventually be combined. An example would be static_rnn which creates multiple copies of state or inputs. \"aggregate\" aggregation strategy that is valid only for tag non None. Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, and OpHint.AGGREGATE_STACK. \"index_override\" The global index to use. This corresponds to the argument order in the final stub that will be generated. Returns: The wrapped input tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:add_input arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get", + "source_code": "def _get(self):\n with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n return super(SyncOnReadVariable, self)._get()", + "docstring": "Returns the value of SyncOnReadVariable based on surrounding context. If called under a non-default replica-context, returns the corresponding variable on that replica. If called under default replica-context or cross-replica context, returns the synced value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_get arg:self arguments arg With Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "lp2bs_zpk", + "source_code": "def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):\n xp = array_namespace(z, p)\n z, p = map(xp.asarray, (z, p))\n z, p = xp_promote(z, p, force_floating=True, xp=xp)\n z = xpx.atleast_nd(z, ndim=1, xp=xp)\n p = xpx.atleast_nd(p, ndim=1, xp=xp)\n wo = float(wo)\n bw = float(bw)\n degree = _relative_degree(z, p)\n z_hp = bw / 2 / z\n p_hp = bw / 2 / p\n z_hp = xp.astype(z_hp, xp.complex128)\n p_hp = xp.astype(p_hp, xp.complex128)\n z_bs = xp.concat((z_hp + xp.sqrt(z_hp ** 2 - wo ** 2), z_hp - xp.sqrt(z_hp ** 2 - wo ** 2)))\n p_bs = xp.concat((p_hp + xp.sqrt(p_hp ** 2 - wo ** 2), p_hp - xp.sqrt(p_hp ** 2 - wo ** 2)))\n z_bs = xp.concat((z_bs, xp.full(degree, +1j * wo)))\n z_bs = xp.concat((z_bs, xp.full(degree, -1j * wo)))\n k_bs = k * xp.real(xp.prod(-z) / xp.prod(-p))\n return (z_bs, p_bs, k_bs)", + "docstring": "Transform a lowpass filter prototype to a bandstop filter. Return an analog band-stop filter with center frequency and stopband width from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. wo : float Desired stopband center, as angular frequency (e.g., rad/s). Defaults to no change. bw : float Desired stopband width, as angular frequency (e.g., rad/s). Defaults to 1. Returns ------- z : ndarray Zeros of the transformed band-stop filter transfer function. p : ndarray Poles of the transformed band-stop filter transfer function. k : float System gain of the transformed band-stop filter. See Also -------- lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear lp2bs Notes ----- This is derived from the s-plane substitution .. math:: s \\rightarrow \\frac{s \\cdot \\mathrm{BW}}{s^2 + {\\omega_0}^2} This is the \"wideband\" transformation, producing a stopband with geometric (log frequency) symmetry about . .. versionadded:: 1.1.0 Examples -------- Transform a low-pass filter represented in 'zpk' (Zero-Pole-Gain) form into a bandstop filter represented in 'zpk' form, with a center frequency wo and bandwidth bw. >>> from scipy.signal import lp2bs_zpk >>> z = [ ] >>> p = [ 0.7 , -1 ] >>> k = 9 >>> wo = 0.5 >>> bw = 10 >>> lp2bs_zpk(z, p, k, wo, bw) ( array([0.+0.5j, 0.+0.5j, 0.-0.5j, 0.-0.5j]), array([14.2681928 +0.j, -0.02506281+0.j, 0.01752149+0.j, -9.97493719+0.j]), -12.857142857142858)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:lp2bs_zpk arg:z arg:p arg:k arg:wo arg:bw arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "restore", + "source_code": "def restore(self, restored_tensors, restored_shapes):\n tensor, = restored_tensors\n return values_util.get_on_read_restore_ops(self._sync_on_read_variable, tensor, self._sync_on_read_variable.aggregation)", + "docstring": "Restore the same value into all variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call" + }, + { + "library": "django", + "name": "relabel_aliases", + "source_code": "def relabel_aliases(self, change_map):\n if not change_map:\n return self\n for pos, child in enumerate(self.children):\n if hasattr(child, 'relabel_aliases'):\n child.relabel_aliases(change_map)\n elif hasattr(child, 'relabeled_clone'):\n self.children[pos] = child.relabeled_clone(change_map)", + "docstring": "Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\where.py", + "ast_data": "FunctionDef name:relabel_aliases arg:self arg:change_map arguments arg arg If Return return:yes For Call If Call Call If Call Assign Call" + }, + { + "library": "tensorflow", + "name": "distributed_mode", + "source_code": "@property\ndef distributed_mode(self):\n return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR", + "docstring": "Whether it is distributed training or not.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "FunctionDef name:distributed_mode arg:self arguments arg Return return:yes BoolOp Call Compare" + }, + { + "library": "pytorch", + "name": "_get_dtype_and_is_dynamic", + "source_code": "def _get_dtype_and_is_dynamic(obs_or_fq: Optional[ObserverOrFakeQuantize]) -> tuple[Optional[torch.dtype], bool]:\n if obs_or_fq is None:\n return (None, False)\n else:\n return (obs_or_fq.dtype, getattr(obs_or_fq, 'is_dynamic', False))", + "docstring": "Given a constructor for observer or fake quant module, returns a Tuple of dtype and is_dynamic", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py", + "ast_data": "FunctionDef name:_get_dtype_and_is_dynamic arg:obs_or_fq arguments arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "scrapy", + "name": "as_async_generator", + "source_code": "async def as_async_generator(it: Iterable[_T] | AsyncIterator[_T]) -> AsyncGenerator[_T]:\n if isinstance(it, AsyncIterator):\n async for r in it:\n yield r\n else:\n for r in it:\n yield r", + "docstring": "Wraps an iterable (sync or async) into an async generator.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\asyncgen.py", + "ast_data": "AsyncFunctionDef name:as_async_generator arg:it arguments arg If Call For" + }, + { + "library": "pandas", + "name": "extractall", + "source_code": "@forbid_nonstring_types(['bytes'])\ndef extractall(self, pat, flags: int=0) -> DataFrame:\n return str_extractall(self._orig, pat, flags)", + "docstring": "Extract capture groups in the regex as columns in DataFrame. For each subject string in the Series, extract groups from all matches of regular expression pat. When each subject string in the Series has exactly one match, extractall(pat).xs(0, level='match') is the same as extract(pat). Parameters ---------- pat : str Regular expression pattern with capturing groups. flags : int, default 0 (no flags) A ``. Any capture group names in regular expression pat will be used for column names; otherwise capture group numbers will be used. See Also -------- extract : Returns first match only (not all matches). Examples -------- A pattern with one group will return a DataFrame with one column. Indices with no matches will not appear in the result. >>> s = pd.Series([\"a1a2\", \"b1\", \"c1\"], index=[\"A\", \"B\", \"C\"]) >>> s.str.extractall(r\"\") 0 match A 0 1 1 2 B 0 1 Capture group names are used for column names of the result. >>> s.str.extractall(r\"\") digit match A 0 1 1 2 B 0 1 A pattern with two groups will return a DataFrame with two columns. >>> s.str.extractall(r\"(?P[ab])(?P\\d)\") letter digit match A 0 a 1 1 a 2 B 0 b 1 Optional groups that do not match are NaN in the result. >>> s.str.extractall(r\"(?P[ab])?(?P\\d)\") letter digit match A 0 a 1 1 a 2 B 0 b 1 C 0 NaN 1", + "type": "method", + "file_path": "pandas\\pandas\\core\\strings\\accessor.py", + "ast_data": "FunctionDef name:extractall arg:self arg:pat arg:flags arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "select_one_layer_lstm_function", + "source_code": "def select_one_layer_lstm_function(input, hx, params):\n\n def use_mkldnn(input, hx, params):\n if not torch._C._get_mkldnn_enabled():\n return False\n tensors = [input] + list(hx) + list(chain.from_iterable(params))\n devices = {t.device for t in tensors}\n if len(devices) != 1:\n return False\n device = devices.pop()\n if device != torch.device('cpu'):\n return False\n dtypes = {t.dtype for t in tensors}\n for dtype in dtypes:\n if dtype not in [torch.float, torch.bfloat16]:\n return False\n if input.requires_grad:\n return False\n has_projections = hx[0].size(2) != hx[1].size(2)\n if has_projections:\n return False\n return True\n if use_mkldnn(input, hx, params):\n return mkldnn_one_layer_lstm\n else:\n return one_layer_lstm", + "docstring": "Check whether we could use decompose lstm with mkldnn_rnn_layer. All the below conditions need to be met: * `` to LSTM * params: the weight and bias tensors of LSTM", + "type": "function", + "file_path": "pytorch\\torch\\_decomp\\decompositions.py", + "ast_data": "FunctionDef name:select_one_layer_lstm_function arg:input arg:hx arg:params arguments arg arg arg FunctionDef name:use_mkldnn arg:input arg:hx arg:params arguments arg arg arg If Call Return return:yes Assign Call Call Call Assign If Compare Call Return return:yes Assign Call If Compare Call Return return:yes Assign For If Compare Return return:yes If Return return:yes Assign Compare Call Call If Return return:yes Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "convert", + "source_code": "def convert(module, mapping=None, inplace=False, remove_qconfig=True, is_reference=False, convert_custom_config_dict=None, use_precomputed_fake_quant=False):\n torch._C._log_api_usage_once('quantization_api.quantize.convert')\n if not inplace:\n module = copy.deepcopy(module)\n _convert(module, mapping, inplace=True, is_reference=is_reference, convert_custom_config_dict=convert_custom_config_dict, use_precomputed_fake_quant=use_precomputed_fake_quant)\n if remove_qconfig:\n _remove_qconfig(module)\n return module", + "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class. And remove qconfig at the end if remove_qconfig is set to True. Args: : prepared and calibrated module : a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules : carry out model transformations in-place, the original module is mutated : custom configuration dictionary for convert function : a flag to enable use of precomputed fake quant .. code-block:: python # Example of convert_custom_config_dict: convert_custom_config_dict = { # user will manually define the corresponding quantized # module class which has a from_observed class method that converts # observed custom module to quantized custom module \"observed_to_quantized_custom_module_class\": { ObservedCustomModule: QuantizedCustomModule } }", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", + "ast_data": "FunctionDef name:convert arg:module arg:mapping arg:inplace arg:remove_qconfig arg:is_reference arg:convert_custom_config_dict arg:use_precomputed_fake_quant arguments arg arg arg arg arg arg arg Call If Assign Call Call If Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_view", + "source_code": "def _get_view(self):\n return {'xlim': self.get_xlim(), 'autoscalex_on': self.get_autoscalex_on(), 'ylim': self.get_ylim(), 'autoscaley_on': self.get_autoscaley_on()}", + "docstring": "Save information required to reproduce the current view. This method is called before a view is changed, such as during a pan or zoom initiated by the user. It returns an opaque object that describes the current view, in a format compatible with :meth:. The default implementation saves the view limits and autoscaling state. Subclasses may override this as needed, as long as :meth: is also adjusted accordingly.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_get_view arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "combine_partitions_based_on_size", + "source_code": "def combine_partitions_based_on_size(partitions: list[Partition], available_mem_bytes: int) -> None:\n find_combination = True\n while find_combination:\n sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)\n get_bfs_level_partition(self.partitions)\n find_combination, partitions = find_partition_to_combine_based_on_size(sorted_partitions, available_mem_bytes, partitions)\n return", + "docstring": "Combining small partitions together to keep as less partitions as possible. Here is an example of the algorithm to do this: Assume some partitions, we first sort them based on partition used memory size. [(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)] The available memory is 10. step 1: self.find_partition_to_combine_based_on_size() First, mark bfs level for each partition Second, look the smallest partition, partition_4: 10 - 1 = 9 It means any partition has a used memory equal or less than 9 could combine this partition We go from the largest and selection partition_0. Check the bfs level for two partitions, if the level difference is less than 2, it can be combined. step 2: repeat step 1 until no partitions can be combined", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:combine_partitions_based_on_size arg:partitions arg:available_mem_bytes arguments arg arg Assign While Assign Call arguments arg Call Assign Call Return return:no" + }, + { + "library": "pandas", + "name": "_getitem_tuple_same_dim", + "source_code": "@final\ndef _getitem_tuple_same_dim(self, tup: tuple):\n retval = self.obj\n start_val = self.ndim - len(tup) + 1\n for i, key in enumerate(reversed(tup)):\n i = self.ndim - i - start_val\n if com.is_null_slice(key):\n continue\n retval = getattr(retval, self.name)._getitem_axis(key, axis=i)\n assert retval.ndim == self.ndim\n if retval is self.obj:\n retval = retval.copy(deep=False)\n return retval", + "docstring": "Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_getitem_tuple_same_dim arg:self arg:tup arguments arg arg Assign Assign Call For Call Call Assign If Call Assign Call Call Compare If Compare Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "compute_jac_scale", + "source_code": "def compute_jac_scale(J, scale_inv_old=None):\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel() ** 0.5\n else:\n scale_inv = np.sum(J ** 2, axis=0) ** 0.5\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n return (1 / scale_inv, scale_inv)", + "docstring": "Compute variables scale based on the Jacobian matrix.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:compute_jac_scale arg:J arg:scale_inv_old arguments arg arg If Call Assign Call Call Call Call Assign Call If Compare Assign Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "CompiledFxGraphConstantsWithGm", + "source_code": "class CompiledFxGraphConstantsWithGm(CompiledFxGraphConstants):\n\n def __init__(self, gm: torch.fx.GraphModule) -> None:\n self.gm = gm\n\n def unwrap(self, g: CompiledFxGraph) -> dict[str, torch.Tensor]:\n frozen_params = {name: getattr(self.gm, orig_name) for name, orig_name in g.frozen_param_names.items()}\n constants = g.constants or {}\n return {**constants, **frozen_params}", + "docstring": "This version of CompiledFxGraphConstants, instead of grabbing constants directly saved on CompiledFxGraphs, will just grab their names. Then, it takes a second GraphModule to grab the corresponding constant values out of. This is necessary for supporting freezing in FxGraphCache.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\output_code.py", + "ast_data": "ClassDef name:CompiledFxGraphConstantsWithGm FunctionDef name:__init__ arg:self arg:gm arguments arg arg Assign FunctionDef name:unwrap arg:self arg:g arguments arg arg Assign Call Call Assign BoolOp Return return:yes" + }, + { + "library": "tensorflow", + "name": "ConditionalExpressionTransformer", + "source_code": "class ConditionalExpressionTransformer(converter.Base):\n\n def visit_IfExp(self, node):\n template = '\\n ag__.if_exp(\\n test,\\n lambda: true_expr,\\n lambda: false_expr,\\n expr_repr)\\n '\n expr_repr = parser.unparse(node.test, include_encoding_marker=False).strip()\n return templates.replace_as_expression(template, test=node.test, true_expr=node.body, false_expr=node.orelse, expr_repr=gast.Constant(expr_repr, kind=None))", + "docstring": "Converts conditional expressions to functional form.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\conditional_expressions.py", + "ast_data": "ClassDef name:ConditionalExpressionTransformer FunctionDef name:visit_IfExp arg:self arg:node arguments arg arg Assign Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "reduce_join_v2", + "source_code": "@tf_export('strings.reduce_join', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_join_v2(inputs, axis=None, keepdims=False, separator='', name=None):\n with ops.name_scope(None, 'ReduceJoin', [inputs, axis]):\n inputs_t = ops.convert_to_tensor(inputs)\n axis = _reduce_join_reduction_dims(inputs_t, axis)\n return gen_string_ops.reduce_join(inputs=inputs_t, reduction_indices=axis, keep_dims=keepdims, separator=separator, name=name)", + "docstring": "Joins all strings into a single string, or joins along an axis. This is the reduction operation for the elementwise op. >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']]).numpy() b'abc123def456' >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], axis=-1).numpy() array([b'abc123', b'def456'], dtype=object) >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], ... axis=-1, ... separator=\" \").numpy() array([b'abc 123', b'def 456'], dtype=object) Args: inputs: A tensor. axis: Which axis to join along. The default behavior is to join all elements, producing a scalar. keepdims: If true, retains reduced dimensions with length 1. separator: a string added between each string being joined. name: A name for the operation (optional). Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py", + "ast_data": "FunctionDef name:reduce_join_v2 arg:inputs arg:axis arg:keepdims arg:separator arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_figwidth", + "source_code": "def get_figwidth(self):\n return self.bbox_inches.width", + "docstring": "Return the figure width in inches.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_figwidth arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_randomized_eigsh", + "source_code": "def _randomized_eigsh(M, n_components, *, n_oversamples=10, n_iter='auto', power_iteration_normalizer='auto', selection='module', random_state=None):\n if selection == 'value':\n raise NotImplementedError()\n elif selection == 'module':\n U, S, Vt = randomized_svd(M, n_components=n_components, n_oversamples=n_oversamples, n_iter=n_iter, power_iteration_normalizer=power_iteration_normalizer, flip_sign=False, random_state=random_state)\n eigvecs = U[:, :n_components]\n eigvals = S[:n_components]\n diag_VtU = np.einsum('ji,ij->j', Vt[:n_components, :], U[:, :n_components])\n signs = np.sign(diag_VtU)\n eigvals = eigvals * signs\n else:\n raise ValueError('Invalid `selection`: %r' % selection)\n return (eigvals, eigvecs)", + "docstring": "Computes a truncated eigendecomposition using randomized methods This method solves the fixed-rank approximation problem described in the Halko et al paper. The choice of which components to select can be tuned with the parameter. .. versionadded:: 0.24 Parameters ---------- M : ndarray or sparse matrix Matrix to decompose, it should be real symmetric square or complex hermitian n_components : int Number of eigenvalues and vectors to extract. n_oversamples : int, default=10 Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. Smaller number can improve speed but can negatively impact the quality of approximation of eigenvectors and eigenvalues. Users might wish to increase this parameter up to where k is the effective rank, for large matrices, noisy problems, matrices with slowly decaying spectrums, or to increase precision accuracy. See Halko et al (pages 5, 23 and 26). n_iter : int or 'auto', default='auto' Number of power iterations. It can be used to deal with very noisy problems. When 'auto', it is set to 4, unless is small (n_iter Halko, et al. (2009)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:_randomized_eigsh arg:M arg:n_components arguments arg arg arg arg arg arg arg If Compare Raise Call If Compare Assign Call Assign Assign Assign Call Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "kl_divergence", + "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\n@tf_export(v1=['distributions.kl_divergence'])\ndef kl_divergence(distribution_a, distribution_b, allow_nan_stats=True, name=None):\n kl_fn = _registered_kl(type(distribution_a), type(distribution_b))\n if kl_fn is None:\n raise NotImplementedError('No KL(distribution_a || distribution_b) registered for distribution_a type %s and distribution_b type %s' % (type(distribution_a).__name__, type(distribution_b).__name__))\n with ops.name_scope('KullbackLeibler'):\n kl_t = kl_fn(distribution_a, distribution_b, name=name)\n if allow_nan_stats:\n return kl_t\n kl_t = array_ops.identity(kl_t, name='kl')\n with ops.control_dependencies([control_flow_assert.Assert(math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))), ['KL calculation between %s and %s returned NaN values (and was called with allow_nan_stats=False). Values:' % (distribution_a.name, distribution_b.name), kl_t])]):\n return array_ops.identity(kl_t, name='checked_kl')", + "docstring": "Get the KL-divergence KL(distribution_a || distribution_b). If there is no KL method registered specifically for and , then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to ). Args: distribution_a: The first distribution. distribution_b: The second distribution. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Returns: A Tensor with the batchwise KL-divergence between and . Raises: NotImplementedError: If no KL method is defined for distribution types of and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py", + "ast_data": "FunctionDef name:kl_divergence arg:distribution_a arg:distribution_b arg:allow_nan_stats arg:name arguments arg arg arg arg Assign Call Call Call If Compare Raise Call Call Call With Call Assign Call If Return return:yes Assign Call With Call Call Call Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "merge", + "source_code": "def merge(self, options):\n return options_lib.merge_options(self, options)", + "docstring": "Merges itself with the given . If this object and the to merge set an option differently, a warning is generated and this object's value is updated with the object's value. Args: options: The to merge with. Returns: New object which is the result of merging self with the input .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py", + "ast_data": "FunctionDef name:merge arg:self arg:options arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, klass, field, load_func=None):\n self._klass = klass\n self._load_func = load_func or klass\n super().__init__(field)", + "docstring": "Initialize on the given Geometry or Raster class (not an instance) and the corresponding field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\models\\proxy.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:klass arg:field arg:load_func arguments arg arg arg arg Assign Assign BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "_extend_op", + "source_code": "def _extend_op(values, leaf_op, empty_st_op=None):\n if not isinstance(values, Sequence):\n raise ValueError('Expected a list')\n if not values:\n raise ValueError('List cannot be empty')\n if empty_st_op is None:\n empty_st_op = empty_st_op_like_zeros(leaf_op)\n value = values[0]\n if isinstance(value, StructuredTensor):\n empty_result = empty_st_op(values)\n if not value.field_names():\n return empty_result\n new_fields = {}\n for k in value.field_names():\n new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op, empty_st_op)\n return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)\n else:\n return leaf_op(values)", + "docstring": "Extend an op from RaggedTensor and Tensor to StructuredTensor. Visits all children of the structured tensor, and children of children, applying leaf_op whenever it reaches a leaf, and empty_st_op whenever it reaches an internal node without children. Args: values: a list of structured tensors, ragged tensors, or tensors. All must have the same type. If they are structured tensors, they must have the same paths. leaf_op: an op for handling non-structured tensor. empty_st_op: op to create a structured tensor without fields. Returns: the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor) Raises: ValueError: If values is not a Sequence or is empty.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_extend_op arg:values arg:leaf_op arg:empty_st_op arguments arg arg arg If Call Raise Call If Raise Call If Compare Assign Call Assign If Call Assign Call If Call Return return:yes Assign For Call Assign Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, *, use_overline=False, one_half='\\\\frac{1}{2}', minor=False, minor_threshold=25, minor_number=6):\n self._use_overline = use_overline\n self._one_half = one_half\n self._minor = minor\n self._labelled = set()\n self._minor_threshold = minor_threshold\n self._minor_number = minor_number", + "docstring": "Parameters ---------- use_overline : bool, default: False If x > 1/2, with x = 1 - v, indicate if x should be displayed as $\\overline{v}$. The default is to display $1 - v$. one_half : str, default: r\"\\\\frac{1}{2}\" The string used to represent 1/2. minor : bool, default: False Indicate if the formatter is formatting minor ticks or not. Basically minor ticks are not labelled, except when only few ticks are provided, ticks with most space with neighbor ticks are labelled. See other parameters to change the default behavior. minor_threshold : int, default: 25 Maximum number of locs for labelling some minor ticks. This parameter have no effect if minor is False. minor_number : int, default: 6 Number of ticks which are labelled when the number of ticks is below the threshold.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Assign" + }, + { + "library": "django", + "name": "CreateView", + "source_code": "class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):\n template_name_suffix = '_form'", + "docstring": "View for creating a new object, with a response rendered by a template.", + "type": "class", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "ClassDef name:CreateView Assign" + }, + { + "library": "pytorch", + "name": "is_available", + "source_code": "def is_available() -> bool:\n acc = current_accelerator()\n if acc is None:\n return False\n mod = torch.get_device_module(acc)\n return mod.is_available()", + "docstring": "Check if the current accelerator is available at runtime: it was build, all the required drivers are available and at least one device is visible. See :ref: for details. Returns: bool: A boolean indicating if there is an available :ref:. .. note:: This API delegates to the device-specific version of . On CUDA, when the environment variable `multiprocessing-poison-fork-note`. Example:: >>> assert torch.accelerator.is_available() \"No available accelerators detected.\"", + "type": "function", + "file_path": "pytorch\\torch\\accelerator\\__init__.py", + "ast_data": "FunctionDef name:is_available arguments Assign Call If Compare Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "incr_version", + "source_code": "def incr_version(self, key, delta=1, version=None):\n if version is None:\n version = self.version\n value = self.get(key, self._missing_key, version=version)\n if value is self._missing_key:\n raise ValueError(\"Key '%s' not found\" % key)\n self.set(key, value, version=version + delta)\n self.delete(key, version=version)\n return version + delta", + "docstring": "Add delta to the cache version for the supplied key. Return the new version.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:incr_version arg:self arg:key arg:delta arg:version arguments arg arg arg arg If Compare Assign Assign Call If Compare Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_device", + "source_code": "@classmethod\ndef from_device(cls, device: str) -> 'Layout':\n return cls.from_single_device_mesh(Mesh.from_device(device))", + "docstring": "Constructs a single device layout from a single device mesh.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:from_device arg:cls arg:device arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "new", + "source_code": "@staticmethod\ndef new(node, function, enclosing_graph):\n if node.op in ['VariableV2', 'VarHandleOp', 'Placeholder']:\n return _VarHandle(node, function, enclosing_graph)\n elif node.op == 'Case':\n return _Case(node, function, enclosing_graph)\n elif node.op == 'Merge':\n return _Merge(node, function, enclosing_graph)\n elif node.op == 'PartitionedCall':\n return _PartitionedCall(node, function, enclosing_graph)\n elif node.op == 'StatefulPartitionedCall':\n return _PartitionedCall(node, function, enclosing_graph)\n elif node.op == 'ReadVariableOp':\n return _ReadVariable(node, function, enclosing_graph)\n elif node.op == 'ResourceGather':\n return _ResourceGather(node, function, enclosing_graph)\n elif node.op == 'ResourceGatherNd':\n return _ResourceGatherNd(node, function, enclosing_graph)\n elif node.op in ['If', 'StatelessIf']:\n return _If(node, function, enclosing_graph)\n elif node.op in ['While', 'StatelessWhile']:\n return _While(node, function, enclosing_graph)\n elif node.op in ['Enter', 'Exit', 'Identity', 'NextIteration', 'Switch', '_SwitchN']:\n return _Intermediate(node, function, enclosing_graph)\n else:\n return _Node(node, function, enclosing_graph)", + "docstring": "Creates a new _Node base on its operation type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:new arg:node arg:function arg:enclosing_graph arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "qualify_name", + "source_code": "def qualify_name(self, name: str) -> str:\n if self.name is not None:\n return f'{self.name}_{name}'\n return name", + "docstring": "Prepend the given name with the graph name if any.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\graph.py", + "ast_data": "FunctionDef name:qualify_name arg:self arg:name arguments arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, X, y=None):\n self.fit_predict(X, y)\n return self", + "docstring": "Estimate model parameters with the EM algorithm. The method fits the model `` is ignored and a single initialization is performed upon the first call. Upon consecutive calls, training starts where it left off. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object The fitted mixture.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes" + }, + { + "library": "cryptography", + "name": "add_revoked_certificate", + "source_code": "def add_revoked_certificate(self, revoked_certificate: RevokedCertificate) -> CertificateRevocationListBuilder:\n if not isinstance(revoked_certificate, RevokedCertificate):\n raise TypeError('Must be an instance of RevokedCertificate')\n return CertificateRevocationListBuilder(self._issuer_name, self._last_update, self._next_update, self._extensions, [*self._revoked_certificates, revoked_certificate])", + "docstring": "Adds a revoked certificate to the CRL.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\base.py", + "ast_data": "FunctionDef name:add_revoked_certificate arg:self arg:revoked_certificate arguments arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_step_for_v2", + "source_code": "def _get_step_for_v2():\n step = _summary_ops_v2.get_step()\n if step is not None:\n return step\n return _training_util.get_global_step()", + "docstring": "Get step for v2 summary invocation in v1. In order to invoke v2 op in , global step needs to be set for the v2 summary writer. Returns: The step set by or , or None is no step has been set.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py", + "ast_data": "FunctionDef name:_get_step_for_v2 arguments Assign Call If Compare Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "transform", + "source_code": "@classmethod\ndef transform(cls, input: Keypoints, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Keypoints:\n if extra_args is None:\n extra_args = {}\n _input = input.clone()\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n _input = module.transform_keypoints(_input, cls.get_instance_module_param(param), module.flags, transform=module.transform_matrix, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d keypoint operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n _input = module.transform_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, K.container.ImageSequentialBase):\n _input = module.transform_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n return KeypointSequentialOps.transform(input, module=module.op, param=param, extra_args=extra_args)\n return _input", + "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor, (B, N, 4, 2) or (B, 4, 2). module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign Assign Call If Call Assign Call Call If Call Raise Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "codes", + "source_code": "@property\ndef codes(self) -> Series:\n from pandas import Series\n return Series(self._parent.codes, index=self._index)", + "docstring": "Return Series of codes as well as the index. See Also -------- Series.cat.categories : Return the categories of this categorical. Series.cat.as_ordered : Set the Categorical to be ordered. Series.cat.as_unordered : Set the Categorical to be unordered. Examples -------- >>> raw_cate = pd.Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\"]) >>> ser = pd.Series(raw_cate) >>> ser.cat.codes 0 0 1 1 2 -1 3 0 dtype: int8", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:codes arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_fragment_2_1", + "source_code": "def _fragment_2_1(X, T, s):\n n = X.shape[0]\n diag_T = np.ravel(T.diagonal().copy())\n scale = 2 ** (-s)\n exp_diag = np.exp(scale * diag_T)\n for k in range(n):\n X[k, k] = exp_diag[k]\n for i in range(s - 1, -1, -1):\n X = X.dot(X)\n scale = 2 ** (-i)\n exp_diag = np.exp(scale * diag_T)\n for k in range(n):\n X[k, k] = exp_diag[k]\n for k in range(n - 1):\n lam_1 = scale * diag_T[k]\n lam_2 = scale * diag_T[k + 1]\n t_12 = scale * T[k, k + 1]\n value = _eq_10_42(lam_1, lam_2, t_12)\n X[k, k + 1] = value\n return X", + "docstring": "A helper function for expm_2009. Notes ----- The argument X is modified in-place, but this modification is not the same as the returned value of the function. This function also takes pains to do things in ways that are compatible with sparse arrays, for example by avoiding fancy indexing and by using methods of the matrices whenever possible instead of using functions of the numpy or scipy libraries themselves.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:_fragment_2_1 arg:X arg:T arg:s arguments arg arg arg Assign Assign Call Call Call Assign Assign Call For Call Assign For Call Assign Call Assign Assign Call For Call Assign For Call Assign Assign Assign Assign Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "isspace", + "source_code": "def isspace(self):\n return isspace(self)", + "docstring": "Returns true for each element if there are only whitespace characters in the string and there is at least one character, false otherwise. See Also -------- char.isspace", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:isspace arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "convert", + "source_code": "def convert(self, module: nn.Module, mapping: Optional[dict[type[nn.Module], type[nn.Module]]]=None, inplace: bool=False, parameterization: type[nn.Module]=FakeSparsity):\n if mapping is None:\n raise NotImplementedError('Need to auto generate mapping ')\n if not inplace:\n module = copy.deepcopy(module)\n reassign = {}\n for name, mod in module.named_children():\n if module_contains_param(mod, parameterization) and type_before_parametrizations(mod) in mapping:\n reassign[name] = swap_module(mod, mapping)\n else:\n reassign[name] = self.convert(mod, mapping=mapping, inplace=True, parameterization=parameterization)\n for key, value in reassign.items():\n module._modules[key] = value\n return module", + "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class Args: module: input module mapping: a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules inplace: carry out model transformations in-place, the original module is mutated", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py", + "ast_data": "FunctionDef name:convert arg:self arg:module arg:mapping arg:inplace arg:parameterization arguments arg arg arg arg arg If Compare Raise Call If Assign Call Assign For Call If BoolOp Call Compare Call Assign Call Assign Call For Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ragged_tensor_binary_crossentropy", + "source_code": "@dispatch.dispatch_for_types(binary_crossentropy, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n fn = functools.partial(binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n return _ragged_tensor_apply_loss(fn, y_true, y_pred)", + "docstring": "Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels. For example, if , use for non-target labels and for target labels. axis: Axis along which to compute crossentropy. Returns: Binary crossentropy loss value. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_ragged_tensor_binary_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "exists_nonce", + "source_code": "def exists_nonce(self, nonce, request):\n raise NotImplementedError()", + "docstring": "Check if the given nonce is existing in your database. Developers MUST implement this method in subclass, e.g.:: def exists_nonce(self, nonce, request): exists = AuthorizationCode.query.filter_by( client_id=request.payload.client_id, nonce=nonce ).first() return bool(exists) :param nonce: A string of \"nonce\" parameter in request :param request: OAuth2Request instance :return: Boolean", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\grants\\code.py", + "ast_data": "FunctionDef name:exists_nonce arg:self arg:nonce arg:request arguments arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_apply_shrinkage", + "source_code": "def _apply_shrinkage(self):\n for leaf in self.finalized_leaves:\n leaf.value *= self.shrinkage", + "docstring": "Multiply leaves values by shrinkage parameter. This must be done at the very end of the growing process. If this were done during the growing process e.g. in finalize_leaf(), then a leaf would be shrunk but its sibling would potentially not be (if it's a non-leaf), which would lead to a wrong computation of the 'middle' value needed to enforce the monotonic constraints.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "FunctionDef name:_apply_shrinkage arg:self arguments arg For" + }, + { + "library": "cherrypy", + "name": "allow", + "source_code": "def allow(methods=None, debug=False):\n if not isinstance(methods, (tuple, list)):\n methods = [methods]\n methods = [m.upper() for m in methods if m]\n if not methods:\n methods = ['GET', 'HEAD']\n elif 'GET' in methods and 'HEAD' not in methods:\n methods.append('HEAD')\n cherrypy.response.headers['Allow'] = ', '.join(methods)\n if cherrypy.request.method not in methods:\n if debug:\n cherrypy.log('request.method %r not in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW')\n raise cherrypy.HTTPError(405)\n elif debug:\n cherrypy.log('request.method %r in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW')", + "docstring": "Raise 405 if request.method not in methods (default ['GET', 'HEAD']). The given methods are case-insensitive, and may be in any order. If only one method is allowed, you may supply a single string; if more than one, supply a list of strings. Regardless of whether the current method is allowed or not, this also emits an 'Allow' response header, containing the given methods.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:allow arg:methods arg:debug arguments arg arg If Call Assign Assign Call If Assign If BoolOp Compare Compare Call Assign Call If Compare If Call Raise Call If Call" + }, + { + "library": "pytorch", + "name": "_reduce_shard_tensor", + "source_code": "def _reduce_shard_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, reduce_op: str, mesh_dim: int) -> torch.Tensor:\n my_coordinate = mesh.get_coordinate()\n num_chunks = mesh.size(mesh_dim=mesh_dim)\n if my_coordinate is None:\n return tensor\n is_padded = tensor.size(self.dim) % num_chunks != 0\n if is_padded:\n scattered_list, pad_sizes = self._split_tensor(tensor, num_chunks, with_padding=True, contiguous=True)\n tensor = torch.cat(scattered_list, dim=self.dim)\n elif not tensor.is_contiguous():\n tensor = tensor.contiguous()\n output = funcol.reduce_scatter_tensor(tensor, reduce_op, scatter_dim=self.dim, group=(mesh, mesh_dim))\n if is_padded:\n output = unpad_tensor(output, self.dim, pad_sizes[my_coordinate[mesh_dim]])\n return output", + "docstring": "reduce and scatter a tensor on a mesh dimension", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "FunctionDef name:_reduce_shard_tensor arg:self arg:tensor arg:mesh arg:reduce_op arg:mesh_dim arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:yes Assign Compare Call If Assign Call Assign Call If Call Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._delayed_rewrite_functions.forward().name", + "docstring": "name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_is_safe_to_split", + "source_code": "def _is_safe_to_split() -> bool:\n return False if _get_default_group().bound_device_id is None else True", + "docstring": "Checks if it is safe to split the any process group in the world. This is only safe if the default pg has a bound device id, otherwise users must be aware that a pg is only splittable after the first collective is issued.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_is_safe_to_split arguments Return return:yes Compare Call" + }, + { + "library": "matplotlib", + "name": "_request_autoscale_view", + "source_code": "def _request_autoscale_view(self, axis='all', tight=None):\n axis_names = _api.check_getitem({**{k: [k] for k in self._axis_names}, 'all': self._axis_names}, axis=axis)\n for name in axis_names:\n self._stale_viewlims[name] = True\n if tight is not None:\n self._tight = tight", + "docstring": "Mark a single axis, or all of them, as stale wrt. autoscaling. No computation is performed until the next autoscaling; thus, separate calls to control individual axises incur negligible performance cost. Parameters ---------- axis : str, default: \"all\" Either an element of ``, or \"all\". tight : bool or None, default: None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_request_autoscale_view arg:self arg:axis arg:tight arguments arg arg arg Assign Call For Assign If Compare Assign" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, raw_prediction):\n if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:\n raw_prediction = raw_prediction.squeeze(1)\n proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)\n proba[:, 1] = self.link.inverse(raw_prediction)\n proba[:, 0] = 1 - proba[:, 1]\n return proba", + "docstring": "Predict probabilities. Parameters ---------- raw_prediction : array of shape (n_samples,) or (n_samples, 1) Raw prediction values (in link space). Returns ------- proba : array of shape (n_samples, 2) Element-wise class probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:raw_prediction arguments arg arg If BoolOp Compare Compare Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, *args, **kwargs):\n return self.module(*args, **kwargs)", + "docstring": "Forward pass.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\swa_utils.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "add_residual", + "source_code": "def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):\n if scaling_vector is None:\n x_flat = x.flatten(1)\n residual = residual.flatten(1)\n x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)\n else:\n x_plus_residual = scaled_index_add(x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor)\n return x_plus_residual", + "docstring": "Add residual connections.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\block.py", + "ast_data": "FunctionDef name:add_residual arg:x arg:brange arg:residual arg:residual_scale_factor arg:scaling_vector arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_request", + "source_code": "def _make_request(self, verb: str, endpoint: str, **kwargs: dict[str, Any]) -> requests.Response:\n res = self._session.request(verb, urllib.parse.urljoin('https://api.github.com', endpoint), json=kwargs)\n res.raise_for_status()\n return res.json()", + "docstring": "Helper method to make a request and raise an HTTPError if one occurred. Arguments: verb: The HTTP verb to use endpoint: The endpoint to make the request to **kwargs: The json that will be sent as the body of the request. Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py", + "ast_data": "FunctionDef name:_make_request arg:self arg:verb arg:endpoint arguments arg arg arg arg Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "row_splits_to_segment_ids", + "source_code": "@tf_export('ragged.row_splits_to_segment_ids')\n@dispatch.add_dispatch_support\ndef row_splits_to_segment_ids(splits, name=None, out_type=None):\n with ops.name_scope(name, 'RaggedSplitsToSegmentIds', [splits]) as name:\n splits = ops.convert_to_tensor(splits, name='splits', preferred_dtype=dtypes.int64)\n if splits.dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('splits must have dtype int32 or int64')\n splits.shape.assert_has_rank(1)\n if tensor_shape.dimension_value(splits.shape[0]) == 0:\n raise ValueError('Invalid row_splits: []')\n if out_type is None:\n out_type = splits.dtype\n else:\n out_type = dtypes.as_dtype(out_type)\n row_lengths = splits[1:] - splits[:-1]\n nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1\n indices = math_ops.range(nrows)\n return ragged_util.repeat(indices, repeats=row_lengths, axis=0)", + "docstring": "Generates the segmentation corresponding to a RaggedTensor . Returns an integer vector , where if splits[0]splits.dtypetf.int64splitsshape=[splits[-1]]splits` is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\segment_id_ops.py", + "ast_data": "FunctionDef name:row_splits_to_segment_ids arg:splits arg:name arg:out_type arguments arg arg arg With Call Assign Call If Compare Raise Call Call If Compare Call Raise Call If Compare Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n self._fit_transform(X)\n return self", + "docstring": "Compute the embedding vectors for data X. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse matrix, precomputed tree, or NearestNeighbors object. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns a fitted instance of self.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "new_cond_branch", + "source_code": "def new_cond_branch(self, section_id):\n assert section_id in self.cond_leaves\n if section_id in self.cond_entry:\n self.cond_leaves[section_id].append(self.leaves)\n self.leaves = self.cond_entry[section_id]\n else:\n self.cond_entry[section_id] = self.leaves", + "docstring": "Begins a new branch in a cond section.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:new_cond_branch arg:self arg:section_id arguments arg arg Compare If Compare Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "_get_main_op_tensor", + "source_code": "def _get_main_op_tensor(meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):\n collection_def = meta_graph_def_to_load.collection_def\n init_op = None\n if init_op_key in collection_def:\n init_op_list = collection_def[init_op_key].node_list.value\n if len(init_op_list) != 1:\n raise RuntimeError(f'Expected exactly one SavedModel init op. Found {len(init_op_list)}: {init_op_list}.')\n init_op = ops.get_collection(init_op_key)[0]\n return init_op", + "docstring": "Gets the main op tensor, if one exists. Args: meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. init_op_key: name of the collection to check; should be one of MAIN_OP_KEY or the deprecated LEGACY_INIT_OP_KEY Returns: The main op tensor, if it exists and otherwise. Raises: RuntimeError: If the collection def corresponding to the main op key has other than exactly one tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:_get_main_op_tensor arg:meta_graph_def_to_load arg:init_op_key arguments arg arg Assign Assign If Compare Assign If Compare Call Raise Call Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "_embed_ptpython_shell", + "source_code": "def _embed_ptpython_shell(namespace: dict[str, Any]={}, banner: str='') -> EmbedFuncT:\n import ptpython.repl\n\n @wraps(_embed_ptpython_shell)\n def wrapper(namespace: dict[str, Any]=namespace, banner: str='') -> None:\n print(banner)\n ptpython.repl.embed(locals=namespace)\n return wrapper", + "docstring": "Start a ptpython shell", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\console.py", + "ast_data": "FunctionDef name:_embed_ptpython_shell arg:namespace arg:banner arguments arg arg FunctionDef name:wrapper arg:namespace arg:banner arguments arg arg Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_set_optimization_parameters", + "source_code": "def _set_optimization_parameters(self, parameters: optimization_parameters_pb2.OptimizationParameters):\n if self.use_gradient_accumulation:\n parameters.gradient_accumulation_status = optimization_parameters_pb2.GradientAccumulationStatus.ENABLED\n else:\n parameters.gradient_accumulation_status = optimization_parameters_pb2.GradientAccumulationStatus.DISABLED\n if self.clip_weight_min is not None:\n parameters.clipping_limits.lower.value = self.clip_weight_min\n if self.clip_weight_max is not None:\n parameters.clipping_limits.upper.value = self.clip_weight_max\n if self.clip_gradient_min is not None:\n parameters.gradient_clipping_limits.lower.value = self.clip_gradient_min\n if self.clip_gradient_max is not None:\n parameters.gradient_clipping_limits.upper.value = self.clip_gradient_max\n if self.weight_decay_factor:\n parameters.weight_decay_factor = self.weight_decay_factor\n if self.multiply_weight_decay_factor_by_learning_rate:\n parameters.multiply_weight_decay_factor_by_learning_rate = True\n parameters.low_dimensional_packing_status = self.low_dimensional_packing_status", + "docstring": "Sets the optimizer fields in the OptimizationParameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", + "ast_data": "FunctionDef name:_set_optimization_parameters arg:self arg:parameters arguments arg arg If Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Assign If Assign Assign" + }, + { + "library": "pandas", + "name": "make_empty", + "source_code": "def make_empty(self, axes=None) -> Self:\n if axes is None:\n axes = [default_index(0)] + self.axes[1:]\n if self.ndim == 1:\n assert isinstance(self, SingleBlockManager)\n blk = self.blocks[0]\n arr = blk.values[:0]\n bp = BlockPlacement(slice(0, 0))\n nb = blk.make_block_same_class(arr, placement=bp)\n blocks = [nb]\n else:\n blocks = []\n return type(self).from_blocks(blocks, axes)", + "docstring": "return an empty BlockManager with the items axis of len 0", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:make_empty arg:self arg:axes arguments arg arg If Compare Assign Call If Compare Call Assign Assign Assign Call Call Assign Call Assign Assign Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "AdjustContrastWithMeanSubtraction", + "source_code": "class AdjustContrastWithMeanSubtraction(Module):\n\n def __init__(self, contrast_factor: Union[float, Tensor]) -> None:\n super().__init__()\n self.contrast_factor: Union[float, Tensor] = contrast_factor\n\n def forward(self, input: Tensor) -> Tensor:\n return adjust_contrast_with_mean_subtraction(input, self.contrast_factor)", + "docstring": "Adjust Contrast of an image. This implementation aligns PIL. Hence, the output is close to TorchVision. The input image is expected to be in the range of [0, 1]. Args: contrast_factor: Contrast adjust factor per element in the batch by subtracting its mean grayscaled version. 0 generates a completely black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Shape: - Input: Image/Input to be adjusted in the shape of :math:. - Output: Adjusted image in the shape of :math:. Example: >>> x = torch.ones(1, 1, 3, 3) >>> AdjustContrastWithMeanSubtraction(0.5)(x) tensor([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.ones(2) >>> AdjustContrastWithMeanSubtraction(y)(x).shape torch.Size([2, 5, 3, 3])", + "type": "class", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "ClassDef name:AdjustContrastWithMeanSubtraction FunctionDef name:__init__ arg:self arg:contrast_factor arguments arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "polyadd", + "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef polyadd(a1, a2):\n truepoly = isinstance(a1, poly1d) or isinstance(a2, poly1d)\n a1 = atleast_1d(a1)\n a2 = atleast_1d(a2)\n diff = len(a2) - len(a1)\n if diff == 0:\n val = a1 + a2\n elif diff > 0:\n zr = NX.zeros(diff, a1.dtype)\n val = NX.concatenate((zr, a1)) + a2\n else:\n zr = NX.zeros(abs(diff), a2.dtype)\n val = a1 + NX.concatenate((zr, a2))\n if truepoly:\n val = poly1d(val)\n return val", + "docstring": "Find the sum of two polynomials. .. note:: This forms part of the old polynomial API. Since version 1.4, the new polynomial API defined in is preferred. A summary of the differences can be found in the :doc:. Returns the polynomial resulting from the sum of two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The sum of the inputs. If either input is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval Examples -------- >>> import numpy as np >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) Using poly1d objects: >>> p1 = np.poly1d([1, 2]) >>> p2 = np.poly1d([9, 5, 4]) >>> print(p1) 1 x + 2 >>> print(p2) 2 9 x + 5 x + 4 >>> print(np.polyadd(p1, p2)) 2 9 x + 6 x + 6", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py", + "ast_data": "FunctionDef name:polyadd arg:a1 arg:a2 arguments arg arg Assign BoolOp Call Call Assign Call Assign Call Assign Call Call If Compare Assign If Compare Assign Call Assign Call Assign Call Call Assign Call If Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, *args, zs=0, zdir='z', depthshade=None, depthshade_minalpha=None, axlim_clip=False, **kwargs):\n if depthshade is None:\n depthshade = rcParams['axes3d.depthshade']\n if depthshade_minalpha is None:\n depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n self._depthshade = depthshade\n self._depthshade_minalpha = depthshade_minalpha\n self._in_draw = False\n super().__init__(*args, **kwargs)\n self.set_3d_properties(zs, zdir, axlim_clip)\n self._offset_zordered = None", + "docstring": "Create a collection of flat 3D paths with its normal vector pointed in *zdir* direction, and located at *zs* on the *zdir* axis. 'zs' can be a scalar or an array-like of the same length as the number of paths in the collection. Constructor arguments are the same as for :class:. In addition, keywords *zs=0* and *zdir='z'* are available. Also, the keyword argument *depthshade* is available to indicate whether or not to shade the patches in order to give the appearance of depth (default is *True*). This is typically desired in scatter plots. *depthshade_minalpha* sets the minimum alpha value applied by depth-shading.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign Assign Assign Assign Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "convert_to_pool_lines", + "source_code": "def convert_to_pool_lines(self, lines):\n name_to_group = self.compute_buffer_groups(lines)\n for i, line in enumerate(lines):\n if isinstance(line, AllocateLine):\n if line.node.get_name() in name_to_group:\n lines[i] = AllocFromPoolLine(self.wrapper, name_to_group[line.node.get_name()])\n elif isinstance(line, FreeIfNotReusedLine):\n assert not line.is_reused\n if line.node.get_name() in name_to_group:\n lines[i] = DeallocFromPoolLine(self.wrapper, name_to_group[line.node.get_name()])\n elif isinstance(line, ReuseLine):\n if line.node.get_name() in name_to_group:\n line.delete_old = False", + "docstring": "Convert AllocateLine/FreeIfNotReusedLine/ReuseLine into their pool-based counterparts.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:convert_to_pool_lines arg:self arg:lines arguments arg arg Assign Call For Call If Call If Compare Call Assign Call Call If Call If Compare Call Assign Call Call If Call If Compare Call Assign" + }, + { + "library": "matplotlib", + "name": "get_index_label_pos", + "source_code": "def get_index_label_pos(index, extent, origin, inverted_xindex):\n if extent is None:\n extent = lookup_extent(origin)\n left, right, bottom, top = extent\n x, y = index_to_coordinate(index, extent, origin)\n is_x0 = index[-2:] == '0]'\n halign = 'left' if is_x0 ^ inverted_xindex else 'right'\n hshift = 0.5 * np.sign(left - right)\n x += hshift * (1 if is_x0 else -1)\n return (x, y, halign)", + "docstring": "Return the desired position and horizontal alignment of an index label.", + "type": "function", + "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py", + "ast_data": "FunctionDef name:get_index_label_pos arg:index arg:extent arg:origin arg:inverted_xindex arguments arg arg arg arg If Compare Assign Call Assign Assign Call Assign Compare Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "dst", + "source_code": "def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):\n return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)", + "docstring": "Return the Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xxn=-1n=Nn=-1/2n=N-1/2k=-1k=N-1n=-1n=N-1n=-0.5n=N-0.5`. The orthonormalized DST-IV is exactly its own inverse. .. versionadded:: 1.2.0 Support for DST-IV. References ---------- .. [1] Wikipedia, \"Discrete sine transform\",", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py", + "ast_data": "FunctionDef name:dst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_AddShardedSaveOps", + "source_code": "def _AddShardedSaveOps(self, filename_tensor, per_device):\n if self._write_version == saver_pb2.SaverDef.V2:\n return self._AddShardedSaveOpsForV2(filename_tensor, per_device)\n num_shards = len(per_device)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name='num_shards')\n for shard, (device, saveables) in enumerate(per_device):\n with ops.device(device):\n sharded_filename = self.sharded_filename(filename_tensor, shard, num_shards_tensor)\n sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))\n with ops.control_dependencies([x.op for x in sharded_saves]):\n return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)", + "docstring": "Add ops to save the params per shard. Args: filename_tensor: a scalar String Tensor. per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as returned by _GroupByDevices(). Returns: An op to save the variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_AddShardedSaveOps arg:self arg:filename_tensor arg:per_device arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Assign Call For Call With Call Assign Call Call Call With Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)", + "docstring": "Learn and apply the dimensionality reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Training samples. y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None Targets. Returns ------- out : array-like or tuple of array-like The transformed data if , otherwise.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "HBoxDivider", + "source_code": "class HBoxDivider(SubplotDivider):\n\n def new_locator(self, nx, nx1=None):\n return super().new_locator(nx, 0, nx1, 0)\n\n def _locate(self, nx, ny, nx1, ny1, axes, renderer):\n nx += self._xrefindex\n nx1 += self._xrefindex\n fig_w, fig_h = self._fig.bbox.size / self._fig.dpi\n x, y, w, h = self.get_position_runtime(axes, renderer)\n summed_ws = self.get_horizontal_sizes(renderer)\n equal_hs = self.get_vertical_sizes(renderer)\n x0, y0, ox, hh = _locate(x, y, w, h, summed_ws, equal_hs, fig_w, fig_h, self.get_anchor())\n if nx1 is None:\n nx1 = -1\n x1, w1 = (x0 + ox[nx] / fig_w, (ox[nx1] - ox[nx]) / fig_w)\n y1, h1 = (y0, hh)\n return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)", + "docstring": "A for laying out axes horizontally, while ensuring that they have equal heights. Examples -------- .. plot:: gallery/axes_grid1/demo_axes_hbox_divider.py", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", + "ast_data": "ClassDef name:HBoxDivider FunctionDef name:new_locator arg:self arg:nx arg:nx1 arguments arg arg arg Return return:yes Call Call FunctionDef name:_locate arg:self arg:nx arg:ny arg:nx1 arg:ny1 arg:axes arg:renderer arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Call If Compare Assign Assign Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "maxdists", + "source_code": "@lazy_cython\ndef maxdists(Z):\n xp = array_namespace(Z)\n Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp)\n _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n\n def cy_maxdists(Z, validate):\n if validate:\n _is_valid_linkage(Z, throw=True, name='Z', xp=np)\n MD = np.zeros((Z.shape[0],))\n _hierarchy.get_max_dist_for_each_cluster(Z, MD, Z.shape[0] + 1)\n return MD\n return xpx.lazy_apply(cy_maxdists, Z, validate=is_lazy_array(Z), shape=(Z.shape[0],), dtype=xp.float64, as_numpy=True, xp=xp)", + "docstring": "Return the maximum distance between any non-singleton cluster. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See `scipy.cluster.hierarchy.maxdistsscipy.cluster.hierarchy.medianscipy.cluster.hierarchy.maxdists` returns 3.5 in this case.", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:maxdists arg:Z arguments arg Assign Call Assign Call Call FunctionDef name:cy_maxdists arg:Z arg:validate arguments arg arg If Call Assign Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_module_state", + "source_code": "def _get_module_state(module: nn.Module) -> Optional[_State]:\n global _module_state_mapping\n if isinstance(module, _State):\n return cast(_State, module)\n elif module in _module_state_mapping:\n state_ref = _module_state_mapping[module]\n state = state_ref()\n if state is None:\n raise AssertionError('State has already been garbage collected')\n return state\n else:\n return None", + "docstring": "Return the ` and returned. If it is managed by a composable API, the corresponding `` will be returned.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_composable_state.py", + "ast_data": "FunctionDef name:_get_module_state arg:module arguments arg If Call Return return:yes Call If Compare Assign Assign Call If Compare Raise Call Return return:yes Return return:no" + }, + { + "library": "kornia", + "name": "equalize", + "source_code": "@perform_keep_shape_image\ndef equalize(input: Tensor) -> Tensor:\n res = []\n for image in input:\n scaled_image = torch.stack([_scale_channel(image[i, :, :]) for i in range(len(image))])\n res.append(scaled_image)\n return torch.stack(res)", + "docstring": "Apply equalize on the input tensor. .. image:: _static/img/equalize.png Implements Equalize function from PIL using PyTorch ops based on uint8 format: Args: input: image tensor to equalize with shape :math:. Returns: Equalized image tensor with shape :math:. Example: >>> x = torch.rand(1, 2, 3, 3) >>> equalize(x).shape torch.Size([1, 2, 3, 3])", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "FunctionDef name:equalize arg:input arguments arg Assign For Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_justify", + "source_code": "def _justify(head: list[Sequence[str]], tail: list[Sequence[str]]) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:\n combined = head + tail\n max_length = [0] * len(combined[0])\n for inner_seq in combined:\n length = [len(item) for item in inner_seq]\n max_length = [max(x, y) for x, y in zip(max_length, length)]\n head_tuples = [tuple((x.rjust(max_len) for x, max_len in zip(seq, max_length))) for seq in head]\n tail_tuples = [tuple((x.rjust(max_len) for x, max_len in zip(seq, max_length))) for seq in tail]\n return (head_tuples, tail_tuples)", + "docstring": "Justify items in head and tail, so they are right-aligned when stacked. Parameters ---------- head : list-like of list-likes of strings tail : list-like of list-likes of strings Returns ------- tuple of list of tuples of strings Same as head and tail, but items are right aligned when stacked vertically. Examples -------- >>> _justify([[\"a\", \"b\"]], [[\"abc\", \"abcd\"]]) ([(' a', ' b')], [('abc', 'abcd')])", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\printing.py", + "ast_data": "FunctionDef name:_justify arg:head arg:tail arguments arg arg Assign Assign Call For Assign Call Assign Call Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_assert_same_graph", + "source_code": "def _assert_same_graph(original_item, item) -> None:\n original_graph = getattr(original_item, 'graph', None)\n graph = getattr(item, 'graph', None)\n if original_graph and graph and (original_graph is not graph):\n raise ValueError('%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph))", + "docstring": "Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_assert_same_graph arg:original_item arg:item arguments arg arg Assign Call Assign Call If BoolOp Compare Raise Call" + }, + { + "library": "scikit-learn", + "name": "__len__", + "source_code": "def __len__(self):\n return len(self.steps)", + "docstring": "Returns the length of the Pipeline", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_accumulate", + "source_code": "def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> ExtensionArray:\n raise NotImplementedError(f'cannot perform {name} with type {self.dtype}')", + "docstring": "Return an ExtensionArray performing an accumulation operation. The underlying data type might change. Parameters ---------- name : str Name of the function, supported values are: - cummin - cummax - cumsum - cumprod skipna : bool, default True If True, skip NA values. **kwargs Additional keyword arguments passed to the accumulation function. Currently, there is no supported kwarg. Returns ------- array An array performing the accumulation operation. Raises ------ NotImplementedError : subclass does not define accumulations See Also -------- api.extensions.ExtensionArray._concat_same_type : Concatenate multiple array of this dtype. api.extensions.ExtensionArray.view : Return a view on the array. api.extensions.ExtensionArray._explode : Transform each element of list-like to a row. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr._accumulate(name=\"cumsum\") [1, 3, 6] Length: 3, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_accumulate arg:self arg:name arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_filter_returned_ops", + "source_code": "def _filter_returned_ops(fn):\n returned_ops = {}\n\n def wrap_and_filter_returned_ops(*args, **kwargs):\n outputs = fn(*args, **kwargs)\n flat_outputs = nest.flatten(outputs)\n for n in range(len(flat_outputs)):\n output = flat_outputs[n]\n if isinstance(output, ops.Operation):\n returned_ops[n] = output\n flat_outputs[n] = None\n return nest.pack_sequence_as(outputs, flat_outputs)\n return (wrap_and_filter_returned_ops, returned_ops)", + "docstring": "Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns in place of any ops, dict that maps the index in the flat output structure to the returned op )", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py", + "ast_data": "FunctionDef name:_filter_returned_ops arg:fn arguments arg Assign FunctionDef name:wrap_and_filter_returned_ops arguments arg arg Assign Call Assign Call For Call Call Assign If Call Assign Assign Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "_read_string", + "source_code": "def _read_string(f):\n length = _read_long(f)\n if length > 0:\n chars = _read_bytes(f, length).decode('latin1')\n _align_32(f)\n else:\n chars = ''\n return chars", + "docstring": "Read a string", + "type": "function", + "file_path": "scipy\\scipy\\io\\_idl.py", + "ast_data": "FunctionDef name:_read_string arg:f arguments arg Assign Call If Compare Assign Call Call Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "hermesub", + "source_code": "def hermesub(c1, c2):\n return pu._sub(c1, c2)", + "docstring": "Subtract one Hermite series from another. Returns the difference of two Hermite series - . The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermeadd, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so subtraction, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite_e import hermesub >>> hermesub([1, 2, 3, 4], [1, 2, 3]) array([0., 0., 0., 4.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:hermesub arg:c1 arg:c2 arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "np_asarray", + "source_code": "def np_asarray(values, dtype=None, order=None, copy=None):\n if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0':\n if dtype is not None and np.issubdtype(dtype, np.number):\n return np.asarray(values, order=order, copy=copy).astype(dtype, copy=copy)\n else:\n return np.asarray(values, dtype=dtype, order=order, copy=copy)\n else:\n return np.asarray(values, dtype=dtype, order=order)", + "docstring": "Converts input values to a NumPy array. It will not make a copy. In NumPy 2.x and later, strict type casting can lead to errors when values overflow the specified dtype. This function addresses this by replacing direct np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for intended overflows, aligning with the behavior of older NumPy versions. Args: values: Array_like objects. E.g., a python list, tuple, or an object whose __array__ method returns an array. dtype: The desired numpy data type for the array. order: {‘C’, ‘F’, ‘A’, ‘K’}. copy: bool. If True, then the object is copied. If None then the object is copied only if needed, i.e. if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (dtype, order, etc.). For False it raises a ValueError if a copy cannot be avoided. Returns: A NumPy array with the specified data type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py", + "ast_data": "FunctionDef name:np_asarray arg:values arg:dtype arg:order arg:copy arguments arg arg arg arg If Compare Call If BoolOp Compare Call Return return:yes Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "super_in_original_context", + "source_code": "def super_in_original_context(f, args, caller_fn_scope):\n if args:\n return f(*args)\n ctx_frame = _find_originating_frame(caller_fn_scope, innermost=False)\n type_arg = ctx_frame.f_locals['__class__']\n self_arg_name = ctx_frame.f_code.co_varnames[0]\n self_arg = ctx_frame.f_locals[self_arg_name]\n return f(type_arg, self_arg)", + "docstring": "Executes the super function in the context of a specified function. See for the exact details Args: f: Callable, typically the super builtin args: List[Any], the original call arguments caller_fn_scope: Optional[function_wrappers.FunctionScope], the function scope of the converted function in which this call was originally made Returns: The result of calling as if it was called in the frame indicated by .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py", + "ast_data": "FunctionDef name:super_in_original_context arg:f arg:args arg:caller_fn_scope arguments arg arg arg If Return return:yes Call Assign Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "extern", + "source_code": "def extern(self, include: 'GlobPattern', *, exclude: 'GlobPattern'=(), allow_empty: bool=True):\n self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(_ModuleProviderAction.EXTERN, allow_empty)", + "docstring": "Include `mockclose`, no such exception is thrown.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:extern arg:self arg:include arguments arg arg arg arg Assign Call Call" + }, + { + "library": "scipy", + "name": "standard_deviation", + "source_code": "def standard_deviation(input, labels=None, index=None):\n return np.sqrt(variance(input, labels, index))", + "docstring": "Calculate the standard deviation of the values of an N-D image array, optionally at specified sub-regions. Parameters ---------- input : array_like N-D image data to process. labels : array_like, optional Labels to identify sub-regions in . If not None, must be same shape as . index : int or sequence of ints, optional to include in output. If None (default), all values where is non-zero are used. Returns ------- standard_deviation : float or ndarray Values of standard deviation, for each sub-region if and are specified. See Also -------- label, variance, maximum, minimum, extrema Examples -------- >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> ndimage.standard_deviation(a) 2.7585095613392387 Features to process can be specified using and : >>> lbl, nlbl = ndimage.label(a) >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) array([ 1.479, 1.5 , 3. ]) If no index is given, non-zero are processed: >>> ndimage.standard_deviation(a, lbl) 2.4874685927665499", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_measurements.py", + "ast_data": "FunctionDef name:standard_deviation arg:input arg:labels arg:index arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "nested_row_lengths", + "source_code": "def nested_row_lengths(self, name=None):\n with ops.name_scope(name, 'RaggedNestedRowLengths', [self]):\n rt_nested_row_lengths = []\n rt = self\n while isinstance(rt, RaggedTensor):\n rt_nested_row_lengths.append(rt.row_lengths())\n rt = rt.values\n return tuple(rt_nested_row_lengths)", + "docstring": "Returns a tuple containing the row_lengths for all ragged dimensions. is a tuple containing the tensors for all ragged dimensions in , ordered from outermost to innermost. Args: name: A name prefix for the returned tensors (optional). Returns: A of 1-D integer . The length of the tuple is equal to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:nested_row_lengths arg:self arg:name arguments arg arg With Call Assign Assign While Call Call Call Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "size", + "source_code": "@final\n@property\ndef size(self) -> int:\n return int(np.prod(self.shape))", + "docstring": "Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- numpy.ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3}) >>> s.size 3 >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.size 4", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_handle_mover", + "source_code": "def _get_handle_mover(graph, feeder, handle):\n dtype = _get_handle_feeder(graph, feeder)\n if dtype is None:\n return None\n handle_device = TensorHandle._get_device_name(handle)\n if feeder.op.device == handle_device:\n return None\n graph_key = TensorHandle._get_mover_key(feeder, handle)\n result = graph._handle_movers.get(graph_key)\n if result is None:\n holder, reader = _get_handle_reader(graph, handle, dtype)\n with graph.as_default(), graph.device(feeder.op.device):\n mover = gen_data_flow_ops.get_session_handle(reader)\n result = (holder, mover)\n graph._handle_movers[graph_key] = result\n return result", + "docstring": "Return a move subgraph for this pair of feeder and handle.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:_get_handle_mover arg:graph arg:feeder arg:handle arguments arg arg arg Assign Call If Compare Return return:no Assign Call If Compare Return return:no Assign Call Assign Call If Compare Assign Call With Call Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_y", + "source_code": "def _check_y(y, multi_output=False, y_numeric=False, estimator=None):\n if multi_output:\n y = check_array(y, accept_sparse='csr', ensure_all_finite=True, ensure_2d=False, dtype=None, input_name='y', estimator=estimator)\n else:\n estimator_name = _check_estimator_name(estimator)\n y = column_or_1d(y, warn=True)\n _assert_all_finite(y, input_name='y', estimator_name=estimator_name)\n _ensure_no_complex_data(y)\n if y_numeric and hasattr(y.dtype, 'kind') and (y.dtype.kind == 'O'):\n y = y.astype(np.float64)\n return y", + "docstring": "Isolated part of check_X_y dedicated to y validation", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:_check_y arg:y arg:multi_output arg:y_numeric arg:estimator arguments arg arg arg arg If Assign Call Assign Call Assign Call Call Call If BoolOp Call Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "record_summaries_every_n_global_steps", + "source_code": "def record_summaries_every_n_global_steps(n, global_step=None):\n if global_step is None:\n global_step = training_util.get_or_create_global_step()\n with ops.device('cpu:0'):\n should = lambda: math_ops.equal(global_step % n, 0)\n if not context.executing_eagerly():\n should = should()\n return record_if(should)", + "docstring": "Sets the should_record_summaries Tensor to true if global_step % n == 0.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:record_summaries_every_n_global_steps arg:n arg:global_step arguments arg arg If Compare Assign Call With Call Assign arguments Call If Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_copy_trackable_to_cpu", + "source_code": "def _copy_trackable_to_cpu(self, object_map):\n if self in object_map:\n for v in self._variables:\n v._copy_trackable_to_cpu(object_map)\n else:\n copied_vars = []\n for v in self._variables:\n v._copy_trackable_to_cpu(object_map)\n copied_vars.append(object_map[v])\n new_var = ShardedVariable(copied_vars, name=self.name)\n object_map[self] = new_var", + "docstring": "For implementing async checkpointing.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare For Call Assign For Call Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "get_workers_list", + "source_code": "def get_workers_list(cluster_resolver):\n worker_job_name = 'worker'\n cluster_spec = cluster_resolver.cluster_spec()\n if not cluster_spec:\n raise errors.UnavailableError('None', 'None', 'Cluster spec not found, your client must run in GCE environment.')\n task_indices = cluster_spec.task_indices(worker_job_name)\n workers_list = [cluster_spec.task_address(worker_job_name, i).replace(':8470', ':8466') for i in task_indices]\n return ','.join(workers_list)", + "docstring": "Returns a comma separated list of TPU worker host:port pairs. Gets cluster_spec from cluster_resolver. Use the worker's task indices to obtain and return a list of host:port pairs. Args: cluster_resolver: TensorFlow TPUClusterResolver instance. Returns: A string of comma separated list of host:port pairs. For example: '10.2.0.1:8466,10.2.0.2:8466,10.2.0.3:8466,10.2.0.4:8466' Raises: UnavailableError: cluster_resolver doesn't contain a valid cluster_spec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\capture_tpu_profile.py", + "ast_data": "FunctionDef name:get_workers_list arg:cluster_resolver arguments arg Assign Assign Call If Raise Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "dedup_names", + "source_code": "def dedup_names(names: Sequence[Hashable], is_potential_multiindex: bool) -> Sequence[Hashable]:\n names = list(names)\n counts: DefaultDict[Hashable, int] = defaultdict(int)\n for i, col in enumerate(names):\n cur_count = counts[col]\n while cur_count > 0:\n counts[col] = cur_count + 1\n if is_potential_multiindex:\n assert isinstance(col, tuple)\n col = col[:-1] + (f'{col[-1]}.{cur_count}',)\n else:\n col = f'{col}.{cur_count}'\n cur_count = counts[col]\n names[i] = col\n counts[col] = cur_count + 1\n return names", + "docstring": "Rename column names if duplicates exist. Currently the renaming is done by appending a period and an autonumeric, but a custom pattern may be supported in the future. Examples -------- >>> dedup_names([\"x\", \"y\", \"x\", \"x\"], is_potential_multiindex=False) ['x', 'y', 'x.1', 'x.2']", + "type": "function", + "file_path": "pandas\\pandas\\io\\common.py", + "ast_data": "FunctionDef name:dedup_names arg:names arg:is_potential_multiindex arguments arg arg Assign Call Call For Call Assign While Compare Assign If Call Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "imshow", + "source_code": "@_preprocess_data()\n@_docstring.interpd\ndef imshow(self, X, cmap=None, norm=None, *, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, colorizer=None, origin=None, extent=None, interpolation_stage=None, filternorm=True, filterrad=4.0, resample=None, url=None, **kwargs):\n im = mimage.AxesImage(self, cmap=cmap, norm=norm, colorizer=colorizer, interpolation=interpolation, origin=origin, extent=extent, filternorm=filternorm, filterrad=filterrad, resample=resample, interpolation_stage=interpolation_stage, **kwargs)\n if aspect is None and (not (im.is_transform_set() and (not im.get_transform().contains_branch(self.transData)))):\n aspect = mpl.rcParams['image.aspect']\n if aspect is not None:\n self.set_aspect(aspect)\n im.set_data(X)\n im.set_alpha(alpha)\n if im.get_clip_path() is None:\n im.set_clip_path(self.patch)\n im._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)\n im._scale_norm(norm, vmin, vmax)\n im.set_url(url)\n im.set_extent(im.get_extent())\n self.add_image(im)\n return im", + "docstring": "Display data as an image, i.e., on a 2D regular raster. The input may either be actual RGB(A) data, or 2D scalar data, which will be rendered as a pseudocolor image. For displaying a grayscale image, set up the colormapping using the parameters `/gallery/images_contours_and_fields/image_antialiasingimage.interpolation.Axes.set_aspectimage.aspect.Axes.set_aspectimage.interpolation/gallery/images_contours_and_fields/interpolation_methods/gallery/images_contours_and_fields/image_antialiasing/gallery/images_contours_and_fields/image_antialiasingimage.originimshow_extentimshow_extentimage.resample.AxesImage.Artist.set_url~matplotlib.image.AxesImage~matplotlib.artist.Artist.AxesImage~matplotlib.pyplot.imshow` expects RGB images adopting the straight (unassociated) alpha representation.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:imshow arg:self arg:X arg:cmap arg:norm arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call If BoolOp Compare BoolOp Call Call Call Assign If Compare Call Call Call If Compare Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_Conv2DGrad", + "source_code": "@ops.RegisterGradient('Conv2D')\ndef _Conv2DGrad(op: ops.Operation, grad):\n dilations = op.get_attr('dilations')\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n explicit_paddings = op.get_attr('explicit_paddings')\n use_cudnn_on_gpu = op.get_attr('use_cudnn_on_gpu')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n return [gen_nn_ops.conv2d_backprop_input(shape_0, op.inputs[1], grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format), gen_nn_ops.conv2d_backprop_filter(op.inputs[0], shape_1, grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)]", + "docstring": "Gradient function for Conv2D.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py", + "ast_data": "FunctionDef name:_Conv2DGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_GreaterThan", + "source_code": "class _GreaterThan(Constraint):\n\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n super().__init__()\n\n def check(self, value):\n return self.lower_bound < value\n\n def __repr__(self):\n fmt_string = self.__class__.__name__[1:]\n fmt_string += f'(lower_bound={self.lower_bound})'\n return fmt_string", + "docstring": "Constrain to a real half line .", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_GreaterThan FunctionDef name:__init__ arg:self arg:lower_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ShuffleDataset", + "source_code": "class _ShuffleDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n def __init__(self, input_dataset, buffer_size, seed=None, reshuffle_each_iteration=True, name=None):\n self._input_dataset = input_dataset\n self._buffer_size = ops.convert_to_tensor(buffer_size, dtype=dtypes.int64, name='buffer_size')\n self._seed, self._seed2 = random_seed.get_seed(seed)\n self._reshuffle_each_iteration = reshuffle_each_iteration\n self._name = name\n if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):\n variant_tensor = gen_dataset_ops.shuffle_dataset_v3(input_dataset._variant_tensor, buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, seed_generator=gen_dataset_ops.dummy_seed_generator(), reshuffle_each_iteration=self._reshuffle_each_iteration, **self._common_args)\n else:\n variant_tensor = gen_dataset_ops.shuffle_dataset(input_dataset._variant_tensor, buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, reshuffle_each_iteration=self._reshuffle_each_iteration, **self._common_args)\n super().__init__(input_dataset, variant_tensor)", + "docstring": "A that randomly shuffles the elements of its input.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\shuffle_op.py", + "ast_data": "ClassDef name:_ShuffleDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:buffer_size arg:seed arg:reshuffle_each_iteration arg:name arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign Assign If BoolOp Call BoolOp Call Call Assign Call Call Assign Call Call Call" + }, + { + "library": "scipy", + "name": "to_zpk", + "source_code": "def to_zpk(self):\n return copy.deepcopy(self)", + "docstring": "Return a copy of the current 'ZerosPolesGain' system. Returns ------- sys : instance of The current system (copy)", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_zpk arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_set_converter_options_for_calibration", + "source_code": "def _set_converter_options_for_calibration(self, converter: TFLiteConverter) -> TFLiteConverter:\n if not converter.optimizations:\n raise ValueError('converter object must set optimizations to lite.Optimize.DEFAULT')\n if not converter.representative_dataset:\n raise ValueError('converter object must set representative_dataset')\n converter.experimental_mlir_quantizer = True\n converter._experimental_calibrate_only = True\n return converter", + "docstring": "Verify converter options and set required experimental options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_set_converter_options_for_calibration arg:self arg:converter arguments arg arg If Raise Call If Raise Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "num_cores_per_replica", + "source_code": "@property\ndef num_cores_per_replica(self) -> int:\n return self._num_cores_per_replica", + "docstring": "The number of cores per replica.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py", + "ast_data": "FunctionDef name:num_cores_per_replica arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_common_shape", + "source_code": "def get_common_shape(x, y):\n if x is None != y is None:\n raise RuntimeError('Cannot find a common shape when LHS shape is None but RHS shape is not (or vice versa): %s vs. %s' % (x, y))\n if x is None:\n return None\n if not isinstance(x, tensor_shape.TensorShape):\n raise TypeError('Expected x to be a TensorShape but saw %s' % (x,))\n if not isinstance(y, tensor_shape.TensorShape):\n raise TypeError('Expected y to be a TensorShape but saw %s' % (y,))\n if x.rank != y.rank or x.rank is None:\n return tensor_shape.TensorShape(None)\n dims = []\n for dim_x, dim_y in zip(x.dims, y.dims):\n if dim_x != dim_y or tensor_shape.dimension_value(dim_x) is None or tensor_shape.dimension_value(dim_y) is None:\n dims.append(None)\n else:\n dims.append(tensor_shape.dimension_value(dim_x))\n return tensor_shape.TensorShape(dims)", + "docstring": "Find a that is compatible with both and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:get_common_shape arg:x arg:y arguments arg arg If Compare Raise Call If Compare Return return:no If Call Raise Call If Call Raise Call If BoolOp Compare Compare Return return:yes Call Assign For Call If BoolOp Compare Compare Call Compare Call Call Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "smallest_normal", + "source_code": "@property\ndef smallest_normal(self):\n if isnan(self._machar.smallest_normal.flat[0]):\n warnings.warn('The value of smallest normal is undefined for double double', UserWarning, stacklevel=2)\n return self._machar.smallest_normal.flat[0]", + "docstring": "Return the value for the smallest normal. Returns ------- smallest_normal : float Value for the smallest normal. Warns ----- UserWarning If the calculated value for the smallest normal is requested for double-double.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\getlimits.py", + "ast_data": "FunctionDef name:smallest_normal arg:self arguments arg If Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_deterministic_vector_sign_flip", + "source_code": "def _deterministic_vector_sign_flip(u):\n max_abs_rows = np.argmax(np.abs(u), axis=1)\n signs = np.sign(u[range(u.shape[0]), max_abs_rows])\n u *= signs[:, np.newaxis]\n return u", + "docstring": "Modify the sign of vectors for reproducibility. Flips the sign of elements of all the vectors (rows of u) such that the absolute maximum element of each vector is positive. Parameters ---------- u : ndarray Array with vectors as its rows. Returns ------- u_flipped : ndarray with same shape as u Array with the sign flipped vectors as its rows.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:_deterministic_vector_sign_flip arg:u arguments arg Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "generate_file_c", + "source_code": "def generate_file_c(sigs, lib_name, accelerate):\n if lib_name == 'BLAS':\n preamble = [C_PREAMBLE]\n elif lib_name == 'LAPACK':\n preamble = [C_PREAMBLE, LAPACK_DECLS]\n else:\n raise RuntimeError(f'Unrecognized lib_name: {lib_name}.')\n preamble = ['/*\\n', *COMMENT_TEXT, '*/\\n'] + preamble + [CPP_GUARD_BEGIN]\n decls = [generate_decl_c(**sig, accelerate=accelerate) for sig in sigs]\n content = preamble + decls + [CPP_GUARD_END]\n return ''.join(content)", + "docstring": "Generate content for C header file for Cython to import.", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py", + "ast_data": "FunctionDef name:generate_file_c arg:sigs arg:lib_name arg:accelerate arguments arg arg arg If Compare Assign If Compare Assign Raise Call Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "_update_hessian", + "source_code": "def _update_hessian(self, ys, Bs, sBs, y):\n self.B = self._syr(1.0 / ys, y, a=self.B)\n self.B = self._syr(-1.0 / sBs, Bs, a=self.B)", + "docstring": "Update the Hessian matrix. BFGS update using the formula: ``. Formula (6.19) in [1]_. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. \"Numerical optimization\" Second Edition (2006).", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:_update_hessian arg:self arg:ys arg:Bs arg:sBs arg:y arguments arg arg arg arg arg Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "current_stream", + "source_code": "def current_stream(device: Optional[_device_t]=None) -> Stream:\n return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))", + "docstring": "Return the currently selected :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the currently selected :class: for the current device, given by :func:, if :attr: is `` (default).", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:current_stream arg:device arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "time_pdist", + "source_code": "def time_pdist(self, num_points, metric):\n distance.pdist(self.points, self.metric, **self.kwargs)", + "docstring": "Time scipy.spatial.distance.pdist over a range of input data sizes and metrics.", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", + "ast_data": "FunctionDef name:time_pdist arg:self arg:num_points arg:metric arguments arg arg arg Call" + }, + { + "library": "django", + "name": "clear_cache", + "source_code": "def clear_cache(self):\n self._cache.clear()", + "docstring": "Clear out the content-type cache.", + "type": "method", + "file_path": "django\\django\\contrib\\contenttypes\\models.py", + "ast_data": "FunctionDef name:clear_cache arg:self arguments arg Call" + }, + { + "library": "scipy", + "name": "_sample_odds_ratio_ci", + "source_code": "def _sample_odds_ratio_ci(self, confidence_level=0.95, alternative='two-sided'):\n if confidence_level < 0 or confidence_level > 1:\n raise ValueError('confidence_level must be between 0 and 1')\n table = self._table\n if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):\n ci = (0, np.inf)\n else:\n ci = _sample_odds_ratio_ci(table, confidence_level=confidence_level, alternative=alternative)\n return ConfidenceInterval(low=ci[0], high=ci[1])", + "docstring": "Confidence interval for the sample odds ratio.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_odds_ratio.py", + "ast_data": "FunctionDef name:_sample_odds_ratio_ci arg:self arg:confidence_level arg:alternative arguments arg arg arg If BoolOp Compare Compare Raise Call Assign If BoolOp Compare Call Compare Call Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "partial_tile", + "source_code": "@classmethod\ndef partial_tile(cls, tile_assignment):\n if not isinstance(tile_assignment, _np.ndarray):\n raise TypeError('PartialTile assignment must be of type np.ndarray')\n dims = list(tile_assignment.shape)\n flattened_devices = tile_assignment.reshape(-1, order='C')\n return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), replicate_on_last_tile_dim=True))", + "docstring": "Returns a partially tiled sharding attribute. This is similar to tile(), but tile_assignment has one more dimension than the tensor, and tiles in the last dimension of tile_assignment are replicated. Args: tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. Raises: TypeError: tile_assignment was not of np.array type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", + "ast_data": "FunctionDef name:partial_tile arg:cls arg:tile_assignment arguments arg arg If Call Raise Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "sparse_add", + "source_code": "@tf_export(v1=['sparse.add', 'sparse_add'])\n@deprecation.deprecated_endpoints('sparse_add')\n@deprecation.deprecated_args(None, 'thresh is deprecated, use threshold instead', 'thresh')\ndef sparse_add(a, b, threshold=None, thresh=None):\n threshold = deprecation.deprecated_argument_lookup('threshold', threshold, 'thresh', thresh)\n if threshold is None:\n threshold = 0\n return sparse_add_v2(a, b, threshold)", + "docstring": "Adds two tensors, at least one of each is a . If one and one are passed in, returns a . If both arguments are s, this returns a . The order of arguments does not matter. Use vanilla for adding two dense s. The shapes of the two operands must match: broadcasting is not supported. The indices of any input are assumed ordered in standard lexicographic order. If this is not the case, before this step run to restore index ordering. If both arguments are sparse, we perform \"clipping\" as follows. By default, if two values sum to zero at some index, the output would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify , indicating that if the sum has a magnitude strictly smaller than , its corresponding value and index would then not be included. In particular, (default) means everything is kept and actual thresholding happens only for a positive value. For example, suppose the logical sum of two sparse operands is (densified): [ 2] [.1 0] [ 6 -.2] Then, * (the default): all 5 index/value pairs will be returned. * : only .1 and 0 will vanish, and the remaining three index/value pairs will be returned. * : .1, 0, and -.2 will vanish. Args: a: The first operand; or . b: The second operand; or . At least one operand must be sparse. threshold: An optional 0-D (defaults to ). The magnitude threshold that determines if an output value/index pair takes space. Its dtype should match that of the values if they are real; if the latter are complex64/complex128, then the dtype should be float32/float64, correspondingly. thresh: Deprecated alias for . Returns: A or a , representing the sum. Raises: TypeError: If both and are s. Use instead.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:sparse_add arg:a arg:b arg:threshold arg:thresh arguments arg arg arg arg Assign Call If Compare Assign Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "MirroredStrategy", + "source_code": "@tf_export('distribute.MirroredStrategy', v1=[])\nclass MirroredStrategy(distribute_lib.Strategy):\n _collective_key_base = 0\n\n def __init__(self, devices=None, cross_device_ops=None):\n extended = MirroredExtended(self, devices=devices, cross_device_ops=cross_device_ops)\n super(MirroredStrategy, self).__init__(extended)\n distribute_lib.distribution_strategy_gauge.get_cell('V2').set('MirroredStrategy')", + "docstring": "Synchronous training across multiple replicas on one machine. This strategy is typically used for training on one machine with multiple GPUs. For TPUs, use . To use with multiple workers, please refer to . For example, a variable created under a is a . If no devices are specified in the constructor argument of the strategy then it will use all the available GPUs. If no GPUs are found, it will use the available CPUs. Note that TensorFlow treats all CPUs on a machine as a single device, and uses threads internally for parallelism. >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> with strategy.scope(): ... x = tf.Variable(1.) >>> x MirroredVariable:{ 0: , 1: } While using distribution strategies, all the variable creation should be done within the strategy's scope. This will replicate the variables across all the replicas and keep them in sync using an all-reduce algorithm. Variables created inside a which is wrapped with a are still . >>> x = [] >>> @tf.function # Wrap the function with tf.function. ... def create_variable(): ... if not x: ... x.append(tf.Variable(1.)) ... return x[0] >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> with strategy.scope(): ... _ = create_variable() ... print(x[0]) MirroredVariable:{ 0: , 1: } can be used to distribute the dataset across the replicas when writing your own training loop. If you are using and methods available in , then will handle the distribution for you. For example: Args: devices: a list of device strings such as . If , all available GPUs are used. If no GPUs are found, CPU is used. cross_device_ops: optional, a descendant of . If this is not set, will be used by default. One would customize this if NCCL isn't available or if a special implementation that exploits the particular hardware is available.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py", + "ast_data": "ClassDef name:MirroredStrategy Assign FunctionDef name:__init__ arg:self arg:devices arg:cross_device_ops arguments arg arg arg Assign Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_generate_jit_forward_graph", + "source_code": "def _generate_jit_forward_graph(self):\n scripted_op_bench = torch.jit.script(self.op_bench)\n return scripted_op_bench.forward_consume", + "docstring": "generate a graph for the forward function via scripting", + "type": "method", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:_generate_jit_forward_graph arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_nontrivial_match", + "source_code": "def assert_nontrivial_match(self):\n for trackable_object in util.list_objects(self._object_graph_view, self._options.experimental_skip_slot_variables):\n self._checkpoint.all_python_objects.add(trackable_object)\n if len(self._checkpoint.object_by_proto_id) <= 1:\n unused_python_objects = object_identity.ObjectIdentitySet(_objects_with_attributes(self._checkpoint.all_python_objects)) - object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n if unused_python_objects:\n raise AssertionError(f'Nothing except the root object matched a checkpointed value. Typically this means that the checkpoint does not match the Python program. The following objects have no matching checkpointed value: {list(unused_python_objects)}')\n else:\n raise AssertionError(f'Nothing to load. No dependencies have been added to {self._object_graph_view.root} yet.')\n return self", + "docstring": "Raises an exception if only the root object matched.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg For Call Call If Compare Call Assign Call Call Call Call If Raise Call Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_load_options", + "source_code": "def get_load_options():\n return _load_context.load_options()", + "docstring": "Returns the load options under a load context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\load_context.py", + "ast_data": "FunctionDef name:get_load_options arguments Return return:yes Call" + }, + { + "library": "numpy", + "name": "rot90", + "source_code": "@array_function_dispatch(_rot90_dispatcher)\ndef rot90(m, k=1, axes=(0, 1)):\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError('len(axes) must be 2.')\n m = asanyarray(m)\n if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError('Axes must be different.')\n if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or (axes[1] < -m.ndim):\n raise ValueError(f'Axes={axes} out of range for array of ndim={m.ndim}.')\n k %= 4\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n axes_list = arange(0, m.ndim)\n axes_list[axes[0]], axes_list[axes[1]] = (axes_list[axes[1]], axes_list[axes[0]])\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n return flip(transpose(m, axes_list), axes[1])", + "docstring": "Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. This means for a 2D array with the default and , the rotation will be counterclockwise. Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes : (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : ndarray A rotated view of . See Also -------- flip : Reverse the order of elements in an array along the given axis. fliplr : Flip an array horizontally. flipud : Flip an array vertically. Notes ----- `` Examples -------- >>> import numpy as np >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:rot90 arg:m arg:k arg:axes arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call If BoolOp Compare Compare Call Raise Call If BoolOp Compare Compare Compare Compare Raise Call If Compare Return return:yes If Compare Return return:yes Call Call Assign Call Assign If Compare Return return:yes Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "SynchronizationError", + "source_code": "class SynchronizationError(Exception):\n pass", + "docstring": "Base class for errors detected by CUDA Sanitizer.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", + "ast_data": "ClassDef name:SynchronizationError" + }, + { + "library": "tensorflow", + "name": "_assert_static", + "source_code": "def _assert_static(condition, data):\n if not condition:\n data_static = [_maybe_constant_value_string(x) for x in data]\n raise errors.InvalidArgumentError(node_def=None, op=None, message='\\n'.join(data_static))", + "docstring": "Raises a InvalidArgumentError with as much information as possible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:_assert_static arg:condition arg:data arguments arg arg If Assign Call Raise Call Call" + }, + { + "library": "scipy", + "name": "MatrixRankWarning", + "source_code": "class MatrixRankWarning(UserWarning):\n pass", + "docstring": "Warning for exactly singular matrices.", + "type": "class", + "file_path": "scipy\\scipy\\sparse\\linalg\\_dsolve\\linsolve.py", + "ast_data": "ClassDef name:MatrixRankWarning" + }, + { + "library": "pandas", + "name": "sample", + "source_code": "@final\ndef sample(self, n: int | None=None, frac: float | None=None, replace: bool=False, weights=None, random_state: RandomState | None=None, axis: Axis | None=None, ignore_index: bool=False) -> Self:\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n obj_len = self.shape[axis]\n rs = common.random_state(random_state)\n size = sample.process_sampling_size(n, frac, replace)\n if size is None:\n assert frac is not None\n size = round(frac * obj_len)\n if weights is not None:\n weights = sample.preprocess_weights(self, weights, axis)\n sampled_indices = sample.sample(obj_len, size, replace, weights, rs)\n result = self.take(sampled_indices, axis=axis)\n if ignore_index:\n result.index = default_index(len(result))\n return result", + "docstring": "Return a random sample of items from an axis of object. You can use for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with . Default = 1 if = None. frac : float, optional Fraction of axis items to return. Cannot be used with . replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default `SeriesNonenfracreplacementTruerandom_statereplaceTruefracnum_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights=\"num_specimen_seen\", random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:sample arg:self arg:n arg:frac arg:replace arg:weights arg:random_state arg:axis arg:ignore_index arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Assign Call Assign Call If Compare Compare Assign Call If Compare Assign Call Assign Call Assign Call If Assign Call Call Return return:yes" + }, + { + "library": "pygame", + "name": "array_blue", + "source_code": "def array_blue(surface):\n size = surface.get_size()\n array = numpy.empty(size, numpy.uint8)\n surface_to_array(array, surface, 'B')\n return array", + "docstring": "pygame.surfarray.array_blue(Surface): return array copy pixel blue into a 2d array Copy the pixel blue values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:array_blue arg:surface arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "create_authorization_url", + "source_code": "def create_authorization_url(self, url, state=None, code_verifier=None, **kwargs):\n if state is None:\n state = generate_token()\n response_type = self.metadata.get('response_type', 'code')\n response_type = kwargs.pop('response_type', response_type)\n if 'redirect_uri' not in kwargs:\n kwargs['redirect_uri'] = self.redirect_uri\n if 'scope' not in kwargs:\n kwargs['scope'] = self.scope\n if code_verifier and response_type == 'code' and (self.code_challenge_method == 'S256'):\n kwargs['code_challenge'] = create_s256_code_challenge(code_verifier)\n kwargs['code_challenge_method'] = self.code_challenge_method\n for k in self.EXTRA_AUTHORIZE_PARAMS:\n if k not in kwargs and k in self.metadata:\n kwargs[k] = self.metadata[k]\n uri = prepare_grant_uri(url, client_id=self.client_id, response_type=response_type, state=state, **kwargs)\n return (uri, state)", + "docstring": "Generate an authorization URL and state. :param url: Authorization endpoint url, must be HTTPS. :param state: An optional state string for CSRF protection. If not given it will be generated for you. :param code_verifier: An optional code_verifier for code challenge. :param kwargs: Extra parameters to include. :return: authorization_url, state", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\client.py", + "ast_data": "FunctionDef name:create_authorization_url arg:self arg:url arg:state arg:code_verifier arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign If Compare Assign If BoolOp Compare Compare Assign Call Assign For If BoolOp Compare Compare Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "record", + "source_code": "def record():\n if step is None:\n raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n with ops.device('cpu:0'):\n summary_tensor = tensor() if callable(tensor) else array_ops.identity(tensor)\n writer = _summary_state.writer\n summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor)\n step_value = _maybe_convert_tensor_to_dtensor(writer, step)\n write_summary_op = gen_summary_ops.write_summary(writer._resource, step_value, summary_value, tag, serialized_metadata, name=scope)\n with ops.control_dependencies([write_summary_op]):\n return constant_op.constant(True)", + "docstring": "Record the actual summary and return True.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:record arguments If Compare Raise Call With Call Assign Call Call Call Assign Assign Call Assign Call Assign Call With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_is_compiled", + "source_code": "def _is_compiled() -> bool:\n return torch._C._mtia_isBuilt()", + "docstring": "Return true if compiled with MTIA support.", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:_is_compiled arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ExitResult", + "source_code": "def ExitResult(self, result):\n if self._outer_context:\n\n def fn(x):\n self._outer_context.AddName(x.name)\n return x\n nest.map_structure(fn, result, expand_composites=True)", + "docstring": "Make a list of tensors available in the outer context.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:ExitResult arg:self arg:result arguments arg arg If FunctionDef name:fn arg:x arguments arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_arg_min_flops", + "source_code": "@ops.RegisterStatistics('ArgMin', 'flops')\ndef _arg_min_flops(graph, node):\n return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)", + "docstring": "Compute flops for ArgMin operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_arg_min_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Dropout", + "source_code": "class Dropout(keras_layers.Dropout, base.Layer):\n\n def __init__(self, rate=0.5, noise_shape=None, seed=None, name=None, **kwargs):\n super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, name=name, **kwargs)\n\n def call(self, inputs, training=False):\n return super(Dropout, self).call(inputs, training=training)", + "docstring": "Applies Dropout to the input. Dropout consists in randomly setting a fraction of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by , so that their sum is unchanged at training time and inference time. Args: rate: The dropout rate, between 0 and 1. E.g. would drop out 10% of input units. noise_shape: 1D tensor of type representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape , and you want the dropout mask to be the same for all timesteps, you can use . seed: A Python integer. Used to create random seeds. See . for behavior. name: The name of the layer (string).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py", + "ast_data": "ClassDef name:Dropout FunctionDef name:__init__ arg:self arg:rate arg:noise_shape arg:seed arg:name arguments arg arg arg arg arg arg Call Call FunctionDef name:call arg:self arg:inputs arg:training arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_get_trimming_maximums", + "source_code": "def _get_trimming_maximums(rn, cn, max_elements, max_rows=None, max_cols=None, scaling_factor: float=0.8) -> tuple[int, int]:\n\n def scale_down(rn, cn):\n if cn >= rn:\n return (rn, int(cn * scaling_factor))\n else:\n return (int(rn * scaling_factor), cn)\n if max_rows:\n rn = max_rows if rn > max_rows else rn\n if max_cols:\n cn = max_cols if cn > max_cols else cn\n while rn * cn > max_elements:\n rn, cn = scale_down(rn, cn)\n return (rn, cn)", + "docstring": "Recursively reduce the number of rows and columns to satisfy max elements. Parameters ---------- rn, cn : int The number of input rows / columns max_elements : int The number of allowable elements max_rows, max_cols : int, optional Directly specify an initial maximum rows or columns before compression. scaling_factor : float Factor at which to reduce the number of rows / columns to fit. Returns ------- rn, cn : tuple New rn and cn values that satisfy the max_elements constraint", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_get_trimming_maximums arg:rn arg:cn arg:max_elements arg:max_rows arg:max_cols arg:scaling_factor arguments arg arg arg arg arg arg FunctionDef name:scale_down arg:rn arg:cn arguments arg arg If Compare Return return:yes Call Return return:yes Call If Assign Compare If Assign Compare While Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "mark_as_return", + "source_code": "def mark_as_return(outputs, acd):\n\n def _mark_as_return(tensor):\n if not tensor_util.is_tf_type(tensor):\n return tensor\n return_tensor = acd.mark_as_return(tensor)\n if getattr(tensor, '_keras_mask', None) is not None:\n return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)\n else:\n return_tensor._keras_mask = None\n if getattr(tensor, '_tfp_distribution', None) is not None:\n return_tensor._tfp_distribution = tensor._tfp_distribution\n return return_tensor\n return nest.map_structure(_mark_as_return, outputs)", + "docstring": "Marks as the return values for automatic control deps.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py", + "ast_data": "FunctionDef name:mark_as_return arg:outputs arg:acd arguments arg arg FunctionDef name:_mark_as_return arg:tensor arguments arg If Call Return return:yes Assign Call If Compare Call Assign Call Assign If Compare Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__enter__", + "source_code": "def __enter__(self):\n return self", + "docstring": "Make usable with \"with\" statement.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "copy_from", + "source_code": "def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)", + "docstring": "Recursively copies the contents of this scope from another scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\activity.py", + "ast_data": "FunctionDef name:copy_from arg:self arg:other arguments arg arg If Compare Compare Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_get_orig_buffer_dtypes", + "source_code": "@no_type_check\ndef _get_orig_buffer_dtypes(state: _FSDPState, buffer_names: list[str]) -> list[torch.dtype]:\n buffer_dtypes: list[torch.dtype] = []\n for buffer_name in buffer_names:\n _p_assert(buffer_name in state._buffer_name_to_orig_dtype, f'{buffer_name} is missing from pre-computed dict on rank {state.rank}, which only has keys {state._buffer_name_to_orig_dtype.keys()}')\n buffer_dtypes.append(state._buffer_name_to_orig_dtype[buffer_name])\n return buffer_dtypes", + "docstring": "Returns the original buffer types of the given buffer names.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_get_orig_buffer_dtypes arg:state arg:buffer_names arguments arg arg For Call Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_var_to_tensor", + "source_code": "def _var_to_tensor(var, dtype=None, name=None, as_ref=False):\n del name\n if dtype is not None and (not dtype.is_compatible_with(var.dtype)):\n raise ValueError('Incompatible type conversion requested to type {!r} for variable of type {!r}'.format(dtype.name, var.dtype.name))\n if as_ref:\n raise NotImplementedError(\"ShardedVariable doesn't support being used as a reference.\")\n if 'embedding_lookup' in ops.get_name_scope():\n raise TypeError('Converting ShardedVariable to tensor in embedding lookup ops is disallowed.')\n return array_ops.concat(var.variables, axis=0)", + "docstring": "Converts a to a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "FunctionDef name:_var_to_tensor arg:var arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Compare Call Raise Call Call If Raise Call If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_checked_labels", + "source_code": "def get_checked_labels(self):\n return [l.get_text() for l, box_checked in zip(self.labels, self.get_status()) if box_checked]", + "docstring": "Return a list of labels currently checked by user.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:get_checked_labels arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "to_pytimedelta", + "source_code": "def to_pytimedelta(self) -> npt.NDArray[np.object_]:\n return ints_to_pytimedelta(self._ndarray)", + "docstring": "Return an ndarray of datetime.timedelta objects. Returns ------- numpy.ndarray A NumPy `` objects, ensuring high precision for time-based calculations. See Also -------- to_timedelta : Convert argument to timedelta format. Timedelta : Represents a duration between two dates or times. DatetimeIndex: Index of datetime64 data. Timedelta.components : Return a components namedtuple-like of a single timedelta. Examples -------- >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit=\"D\") >>> tdelta_idx TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.to_pytimedelta() array([datetime.timedelta(days=1), datetime.timedelta(days=2), datetime.timedelta(days=3)], dtype=object) >>> tidx = pd.TimedeltaIndex(data=[\"1 days 02:30:45\", \"3 days 04:15:10\"]) >>> tidx TimedeltaIndex(['1 days 02:30:45', '3 days 04:15:10'], dtype='timedelta64[ns]', freq=None) >>> tidx.to_pytimedelta() array([datetime.timedelta(days=1, seconds=9045), datetime.timedelta(days=3, seconds=15310)], dtype=object)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py", + "ast_data": "FunctionDef name:to_pytimedelta arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, leftover_dependency_map):\n self.leftover_dependency_map = leftover_dependency_map\n super(CyclicDependencyError, self).__init__()", + "docstring": "Creates a CyclicDependencyException.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:leftover_dependency_map arguments arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "Hardshrink", + "source_code": "class Hardshrink(Module):\n __constants__ = ['lambd']\n lambd: float\n\n def __init__(self, lambd: float=0.5) -> None:\n super().__init__()\n self.lambd = lambd\n\n def forward(self, input: Tensor) -> Tensor:\n return F.hardshrink(input, self.lambd)\n\n def extra_repr(self) -> str:\n return f'{self.lambd}'", + "docstring": "Applies the Hard Shrinkage (Hardshrink) function element-wise. Hardshrink is defined as: .. math:: \\text{HardShrink}(x) = \\begin{cases} x, & \\text{ if } x > \\lambda \\\\ x, & \\text{ if } x >> m = nn.Hardshrink() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:Hardshrink Assign FunctionDef name:__init__ arg:self arg:lambd arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_epsilon", + "source_code": "def set_epsilon(value):\n global _EPSILON\n _EPSILON = value", + "docstring": "Sets the value of the fuzz factor used in numeric expressions. Args: value: float. New value of epsilon. Example: >>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py", + "ast_data": "FunctionDef name:set_epsilon arg:value arguments arg Assign" + }, + { + "library": "pandas", + "name": "_str_escape", + "source_code": "def _str_escape(x, escape):\n if isinstance(x, str):\n if escape == 'html':\n return escape_html(x)\n elif escape == 'latex':\n return _escape_latex(x)\n elif escape == 'latex-math':\n return _escape_latex_math(x)\n else:\n raise ValueError(f\"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, got {escape}\")\n return x", + "docstring": "if escaping: only use on str, else return input", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_str_escape arg:x arg:escape arguments arg arg If Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_WeakTensorIterator", + "source_code": "class _WeakTensorIterator(object):\n __slots__ = ['_weak_tensor', '_index', '_limit']\n\n def __init__(self, weak_tensor, dim0):\n self._weak_tensor = weak_tensor\n self._index = 0\n self._limit = dim0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index == self._limit:\n raise StopIteration\n result = WeakTensor.from_tensor(self._weak_tensor.tensor[self._index])\n self._index += 1\n return result", + "docstring": "Iterates over the leading dim of a WeakTensor. Performs no error checks.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py", + "ast_data": "ClassDef name:_WeakTensorIterator Assign FunctionDef name:__init__ arg:self arg:weak_tensor arg:dim0 arguments arg arg arg Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Compare Raise Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_propagate_tensor_meta_non_cached", + "source_code": "def _propagate_tensor_meta_non_cached(self, op_schema: OpSchema) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]:\n if op_schema.op == aten.equal.default:\n return None\n with FakeTensorMode():\n fake_args = op_schema.gen_fake_args()\n fake_kwargs = op_schema.gen_fake_kwargs()\n fake_out = op_schema.op(*fake_args, **fake_kwargs)\n if isinstance(fake_out, torch.Tensor):\n return TensorMeta(shape=fake_out.shape, stride=fake_out.stride(), dtype=fake_out.dtype)\n elif isinstance(fake_out, (tuple, list)):\n tensor_meta_list: list[Optional[TensorMeta]] = []\n for fake_out_item in fake_out:\n if isinstance(fake_out_item, torch.Tensor):\n tensor_meta_list.append(TensorMeta(shape=fake_out_item.shape, stride=fake_out_item.stride(), dtype=fake_out_item.dtype))\n else:\n tensor_meta_list.append(None)\n return tuple(tensor_meta_list) if isinstance(fake_out, tuple) else tensor_meta_list\n else:\n return None", + "docstring": "Propagate the tensor metadata, it could either return a TensorMeta or a list/tuple of TensorMetas", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_sharding_prop.py", + "ast_data": "FunctionDef name:_propagate_tensor_meta_non_cached arg:self arg:op_schema arguments arg arg If Compare Return return:no With Call Assign Call Assign Call Assign Call If Call Return return:yes Call Call If Call For If Call Call Call Call Call Return return:yes Call Call Return return:no" + }, + { + "library": "scipy", + "name": "hfft2", + "source_code": "@_dispatch\ndef hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n return (Dispatchable(x, np.ndarray),)", + "docstring": "Compute the 2-D FFT of a Hermitian complex array. Parameters ---------- x : array Input array, taken to be Hermitian complex. s : sequence of ints, optional Shape of the real output. axes : sequence of ints, optional Axes over which to compute the FFT. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.ffthfftnhfftn`. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.array([[1+0j, 2+0j], [2+0j, 1+0j]]) # Hermitian-symmetric input >>> scipy.fft.hfft2(x, s=(2, 2)) array([[ 6., 0.], [ 0., -2.]])", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_basic.py", + "ast_data": "FunctionDef name:hfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_initialized", + "source_code": "def is_initialized():\n return _initialized and (not _is_in_bad_fork())", + "docstring": "Return whether PyTorch's CUDA state has been initialized.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:is_initialized arguments Return return:yes BoolOp Call" + }, + { + "library": "tensorflow", + "name": "call_with_layout", + "source_code": "@tf_export('experimental.dtensor.call_with_layout', v1=[])\ndef call_with_layout(fn: Callable[..., Any], layout: Optional[layout_lib.Layout], *args, **kwargs) -> Any:\n if layout is not None:\n if context.executing_eagerly():\n with default_mesh(layout.mesh):\n with _dtensor_device()._default_layout(layout):\n return fn(*args, **kwargs)\n else:\n return relayout(fn(*args, **kwargs), layout)\n return fn(*args, **kwargs)", + "docstring": "Calls a function in the DTensor device scope if is not None. If is not None, consumes DTensor(s) as input and produces a DTensor as output; a DTensor is a tf.Tensor with layout-related attributes. If is None, consumes and produces regular tf.Tensors. Args: fn: A supported TF API function such as tf.zeros. layout: Optional, the layout of the output DTensor. *args: Arguments given to . **kwargs: Keyword arguments given to . Returns: The return value of transformed to a DTensor if requested.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py", + "ast_data": "FunctionDef name:call_with_layout arg:fn arg:layout arguments arg arg arg arg If Compare If Call With Call With Call Call Return return:yes Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "TritonSymbols", + "source_code": "class TritonSymbols:\n reduction_types = OrderedSet([SymT.R0_INDEX, SymT.R1_INDEX])\n block_types = OrderedSet([SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, *reduction_types])\n block_offsets = {symt: sympy.Symbol(f'{prefix_str[symt]}offset', integer=True, nonnegative=True) for symt in block_types}\n block_sizes = {symt: sympy.Symbol(f'{prefix_str[symt].upper()}BLOCK', integer=True, positive=True) for symt in block_types}\n\n @classmethod\n def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol:\n return cls.block_sizes[tree.symt]\n\n @classmethod\n def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol:\n return cls.block_offsets[tree.symt]", + "docstring": "Stores sympy.Symbol instances and constants associated with triton codegen.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "ClassDef name:TritonSymbols Assign Call Assign Call Assign Call Assign Call Call FunctionDef name:get_block_size arg:cls arg:tree arguments arg arg Return return:yes FunctionDef name:get_block_offset arg:cls arg:tree arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "decorated", + "source_code": "@parameterized.named_parameters(*params)\n@functools.wraps(f)\ndef decorated(self, run_mode, *args, **kwargs):\n if run_mode == 'v1_session':\n _v1_session_test(f, self, config, *args, **kwargs)\n elif run_mode == 'v2_eager':\n _v2_eager_test(f, self, *args, **kwargs)\n elif run_mode == 'v2_function':\n _v2_function_test(f, self, *args, **kwargs)\n else:\n return ValueError('Unknown run mode %s' % run_mode)", + "docstring": "A run of a single test case w/ specified run mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py", + "ast_data": "FunctionDef name:decorated arg:self arg:run_mode arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_weighted_flat_map", + "source_code": "def _weighted_flat_map(input_datasets: Sequence[dataset_ops.DatasetV2], weights: Optional[Sequence[Union[float, tensor.Tensor]]]=None, name: Optional[str]=None) -> dataset_ops.DatasetV2:\n return _WeightedFlatMap(input_datasets, weights, name=name)", + "docstring": "A that fetches elements from and flattens them. This operation combines elements from multiple datasets into a flattened dataset. Elements are read in proportion to the assigned to each input dataset. All requested elements from a dataset are read before reading the elements from the next dataset. For example, suppose we have 2 datasets: # TODO(wilsin): Make the following code testable after the API is released. dataset1 = tf.data.Dataset.range(0, 10) dataset2 = tf.data.Dataset.range(10, 20), Suppose that we call from these 2 datasets with the following weights: dataset = tf.data.Dataset.weighted_flat_map([dataset1, dataset2], [0.5, 1.0]) Then, the outcome of the elements is: # [0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Args: input_datasets: A non-empty list of objects with compatible structure. weights: (Optional.) A list or Tensor of non-zero floating-point values where represents the probability to sample from , or a object where each element is such a list. Defaults to a uniform distribution across . name: (Optional.) A name for the tf.data operation. Returns: A dataset that reads elements from all its inputs, reading the requested elements from an input according to the weight before proceeding to the next input. The number of elements read from an input is in proportion to its weight given in . Raises: TypeError: if the or arguments have the wrong type. ValueError: - if has less than 2 datasets. - if is specified and does not match the length of . InvalidArgumentError: - if any of the has an unknown or infinite cardinality. - if any of the has a value that is less than or equal to 0.0", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\weighted_flat_map_op.py", + "ast_data": "FunctionDef name:_weighted_flat_map arg:input_datasets arg:weights arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "pad_to_cardinality", + "source_code": "@tf_export('data.experimental.pad_to_cardinality')\ndef pad_to_cardinality(cardinality, mask_key='valid'):\n\n def make_filler_dataset(ds):\n padding = cardinality - ds.cardinality()\n filler_element = nest.map_structure(lambda spec: array_ops.zeros(spec.shape, spec.dtype), ds.element_spec)\n filler_element[mask_key] = False\n filler_dataset = dataset_ops.Dataset.from_tensors(filler_element)\n filler_dataset = filler_dataset.repeat(padding)\n return filler_dataset\n\n def apply_valid_mask(x):\n x[mask_key] = True\n return x\n\n def _apply_fn(dataset):\n if context.executing_eagerly():\n if dataset.cardinality() < 0:\n raise ValueError(f'The dataset passed into `pad_to_cardinality` must have a known cardinalty, but has cardinality {dataset.cardinality()}')\n if dataset.cardinality() > cardinality:\n raise ValueError(f'The dataset passed into `pad_to_cardinality` must have a cardinalty less than the target cardinality ({cardinality}), but has cardinality {dataset.cardinality()}')\n if not isinstance(dataset.element_spec, Mapping):\n raise ValueError('`pad_to_cardinality` requires its input dataset to be a dictionary.')\n filler = make_filler_dataset(dataset)\n dataset = dataset.map(apply_valid_mask)\n dataset = dataset.concatenate(filler)\n return dataset\n return _apply_fn", + "docstring": "Pads a dataset with fake elements to reach the desired cardinality. The dataset to pad must have a known and finite cardinality and contain dictionary elements. The will be added to differentiate between real and padding elements -- real elements will have a entry while padding elements will have a entry. Example usage: ds = tf.data.Dataset.from_tensor_slices({'a': [1, 2]}) ds = ds.apply(tf.data.experimental.pad_to_cardinality(3)) list(ds.as_numpy_iterator()) [{'a': 1, 'valid': True}, {'a': 2, 'valid': True}, {'a': 0, 'valid': False}] This can be useful, e.g. during eval, when partial batches are undesirable but it is also important not to drop any data. Args: cardinality: The cardinality to pad the dataset to. mask_key: The key to use for identifying real vs padding elements. Returns: A dataset transformation that can be applied via .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\pad_to_cardinality.py", + "ast_data": "FunctionDef name:pad_to_cardinality arg:cardinality arg:mask_key arguments arg arg FunctionDef name:make_filler_dataset arg:ds arguments arg Assign Call Assign Call arguments arg Call Assign Assign Call Assign Call Return return:yes FunctionDef name:apply_valid_mask arg:x arguments arg Assign Return return:yes FunctionDef name:_apply_fn arg:dataset arguments arg If Call If Compare Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Assign Call Assign Call Assign Call Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "decode", + "source_code": "def decode(self, doc):\n if self.input == 'filename':\n with open(doc, 'rb') as fh:\n doc = fh.read()\n elif self.input == 'file':\n doc = doc.read()\n if isinstance(doc, bytes):\n doc = doc.decode(self.encoding, self.decode_error)\n if doc is np.nan:\n raise ValueError('np.nan is an invalid document, expected byte or unicode string.')\n return doc", + "docstring": "Decode the input into a string of unicode symbols. The decoding strategy depends on the vectorizer parameters. Parameters ---------- doc : bytes or str The string to decode. Returns ------- doc: str A string of unicode symbols.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:decode arg:self arg:doc arguments arg arg If Compare With Call Assign Call If Compare Assign Call If Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_onednn_backend_config", + "source_code": "def get_onednn_backend_config() -> BackendConfig:\n return BackendConfig('onednn').set_backend_pattern_configs(conv_configs).set_backend_pattern_configs(linear_configs).set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)).set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)).set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)).set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)).set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)).set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)).set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)).set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)).set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))", + "docstring": "Return the for PyTorch's native ONEDNN backend.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py", + "ast_data": "FunctionDef name:get_onednn_backend_config arguments Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_executor_init", + "source_code": "def _get_executor_init(self, workers):\n\n def pool_fn(seqs):\n pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", + "docstring": "Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:_get_executor_init arg:self arg:workers arguments arg arg FunctionDef name:pool_fn arg:seqs arguments arg Assign Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "desc", + "source_code": "def desc(self):\n config = self.config()\n config_str = '_'.join([str(x) for x in config])\n device = self.device\n if 'NNC_NUM_THREADS' in os.environ:\n num_threads_str = os.environ['NNC_NUM_THREADS']\n device += num_threads_str\n return f'{self.engine.mode}: {self.module()}_{self.mode}_{device}_{config_str}'", + "docstring": "return the description of the current benchmark", + "type": "method", + "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py", + "ast_data": "FunctionDef name:desc arg:self arguments arg Assign Call Assign Call Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name, func, create_scope_now=False, custom_getter=None, create_graph_function=False):\n if not context.executing_eagerly():\n raise RuntimeError('{} objects can only be used when eager execution is enabled, use tf.Template for graph construction'.format(type(self)))\n super(EagerTemplate, self).__init__(name, func, create_scope_now, None, custom_getter, create_graph_function)\n if self._variable_scope is not None:\n variable_scope_name = self._variable_scope.name\n else:\n variable_scope_name = None\n self._template_store = _EagerTemplateVariableStore(variable_scope_name)\n self._variable_scope_context_manager = None", + "docstring": "Creates a template for the given function. Args: name: A name for the scope created by this template. The name will be made unique by appending to the it (see how treats the for details). func: The function to apply each time. create_scope_now: Whether to create the scope at Template construction time, rather than first call. Defaults to false. Creating the scope at construction time may be more convenient if the template is passed through much lower level code, and you want to be sure of the scope name without knowing exactly where it will be first called. If set to True, the scope will be created in the constructor, and all subsequent times in , leading to a trailing numeral being added to the names of all created Tensors. If set to False, the scope will be created at the first call location. custom_getter: optional custom getter to pass to create_graph_function: When True, will be executed as a graph function. Enabling this flag allows the caller to reap the performance benefits associated with executing graphs, at the cost of sacrificing debuggability; however, not all Python functions can be compiled into graph functions. See the documentation for for details. Raises: RuntimeError: if eager execution is not enabled.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:func arg:create_scope_now arg:custom_getter arg:create_graph_function arguments arg arg arg arg arg arg If Call Raise Call Call Call Call Call If Compare Assign Assign Assign Call Assign" + }, + { + "library": "pytorch", + "name": "_check_flat_params_on_expected_device", + "source_code": "def _check_flat_params_on_expected_device(state: _FSDPState, module: nn.Module):\n cpu_device = torch.device('cpu')\n for handle in traversal_utils._get_fsdp_handles(module):\n if not handle._offload_params and handle.flat_param.device != state.compute_device:\n raise RuntimeError(f'An FSDP-managed module unexpectedly has parameters on {handle.flat_param.device}. Make sure to move the module to {state.compute_device} before training.')\n elif handle._offload_params and handle.flat_param.device != cpu_device:\n raise RuntimeError(f'An FSDP-managed module with parameter CPU offloading enabled has parameters on {handle.flat_param.device}. Make sure to not move the module from CPU when offloading parameters.')", + "docstring": "Checks that all `` are on the expected device for *lazy initialization*.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_check_flat_params_on_expected_device arg:state arg:module arguments arg arg Assign Call For Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call" + }, + { + "library": "seaborn", + "name": "z_score", + "source_code": "@staticmethod\ndef z_score(data2d, axis=1):\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n if axis == 1:\n return z_scored\n else:\n return z_scored.T", + "docstring": "Standarize the mean and variance of the data axis Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- normalized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis.", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:z_score arg:data2d arg:axis arguments arg arg If Compare Assign Assign Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_handle_failure_and_recovery", + "source_code": "def _handle_failure_and_recovery(self, e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name):\n if on_failure_fn:\n on_failure_fn(e)\n with self._cluster_update_lock:\n self._cluster_due_for_update_or_finish.set()\n self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)\n if self._error_from_recovery:\n try:\n raise self._error_from_recovery\n finally:\n self._error_from_recovery = None\n logging.info('Worker %s has been recovered.', worker_device_name)\n if on_recovery_fn:\n logging.info('Worker %s calling on_recovery_fn', worker_device_name)\n with self.wait_on_failure(on_recovery_fn=on_recovery_fn, on_transient_failure_fn=on_transient_failure_fn, worker_device_name=worker_device_name):\n on_recovery_fn()", + "docstring": "Call failure fn, wait for cluster to recover, then call recovery fn. Args: e: the Exception thrown during closure execution. on_failure_fn: an optional function to run if preemption happens. on_transient_failure_fn: an optional function to run if transient failure happens. on_recovery_fn: an optional function to run when a worker is recovered from preemption. worker_device_name: the device name of the worker instance that is passing through the failure.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_handle_failure_and_recovery arg:self arg:e arg:on_failure_fn arg:on_transient_failure_fn arg:on_recovery_fn arg:worker_device_name arguments arg arg arg arg arg arg If Call With Call Call If Try Raise Assign Call If Call With Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, metric_name, metric_methods, label_length, *args):\n self._metric_name = metric_name\n self._metric_methods = metric_methods\n self._label_length = label_length\n if label_length >= len(self._metric_methods):\n raise ValueError('Cannot create {} metric with label >= {}'.format(self._metric_name, len(self._metric_methods)))\n self._metric = self._metric_methods[self._label_length].create(*args)", + "docstring": "Creates a new metric. Args: metric_name: name of the metric class. metric_methods: list of swig metric methods. label_length: length of label args. *args: the arguments to call create method.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:metric_name arg:metric_methods arg:label_length arguments arg arg arg arg arg Assign Assign Assign If Compare Call Raise Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "categorical", + "source_code": "@tf_export('random.categorical')\n@dispatch.add_dispatch_support\ndef categorical(logits, num_samples, dtype=None, seed=None, name=None):\n with ops.name_scope(name, 'categorical', [logits]):\n return multinomial_categorical_impl(logits, num_samples, dtype, seed)", + "docstring": "Draws samples from a categorical distribution. Example: Args: logits: 2-D Tensor with shape . Each slice represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. dtype: The integer type of the output: or . Defaults to . seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: Optional name for the operation. Returns: The drawn samples of shape .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", + "ast_data": "FunctionDef name:categorical arg:logits arg:num_samples arg:dtype arg:seed arg:name arguments arg arg arg arg arg With Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "center", + "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_just_dispatcher)\ndef center(a, width, fillchar=' '):\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f\"unsupported type {width.dtype} for operand 'width'\")\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n if np.any(str_len(fillchar) != 1):\n raise TypeError('The fill character must be exactly one character long')\n if np.result_type(a, fillchar).char == 'T':\n return _center(a, width, fillchar)\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n out_dtype = f'{a.dtype.char}{width.max()}'\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _center(a, width, fillchar, out=out)", + "docstring": "Return a copy of with its elements centered in a string of length . Parameters ---------- a : array-like, with ``width >> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype=' Request:\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError('SelectorList is not supported')\n encoding = self.encoding if encoding is None else encoding\n return super().follow(url=url, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags)", + "docstring": "Return a :class: instance to follow a link `~scrapy.link.Linktopics-link-extractors~scrapy.Selector element, e.g. `~scrapy.Selectorresponse-follow-example` for usage examples.", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\text.py", + "ast_data": "FunctionDef name:follow arg:self arg:url arg:callback arg:method arg:headers arg:body arg:cookies arg:meta arg:encoding arg:priority arg:dont_filter arg:errback arg:cb_kwargs arg:flags arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Call Assign Call If Call Raise Call Assign Compare Return return:yes Call Call" + }, + { + "library": "django", + "name": "InputStreamExhausted", + "source_code": "class InputStreamExhausted(Exception):\n pass", + "docstring": "No more reads are allowed from this device.", + "type": "class", + "file_path": "django\\django\\http\\multipartparser.py", + "ast_data": "ClassDef name:InputStreamExhausted" + }, + { + "library": "tensorflow", + "name": "key", + "source_code": "@property\ndef key(self):\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n return self._state_var[-1]\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))", + "docstring": "The 'key' part of the state of a counter-based RNG. For a counter-base RNG algorithm such as Philox and ThreeFry (as described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3' [ the RNG state consists of two parts: counter and key. The output is generated via the formula: output=hash(key, counter), i.e. a hashing of the counter parametrized by the key. Two RNGs with two different keys can be thought as generating two independent random-number streams (a stream is formed by increasing the counter). Returns: A scalar which is the 'key' part of the state, if the RNG algorithm is counter-based; otherwise it raises a ValueError.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:key arg:self arguments arg Assign If Compare Return return:yes Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_prune_invalid_weights", + "source_code": "def _prune_invalid_weights(sparse_ids, sparse_weights):\n if sparse_weights is not None:\n is_weights_valid = math_ops.greater(sparse_weights.values, 0)\n sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)\n sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)\n return (sparse_ids, sparse_weights)", + "docstring": "Prune invalid weights (< 0) from the input ids and weights.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\embedding_ops.py", + "ast_data": "FunctionDef name:_prune_invalid_weights arg:sparse_ids arg:sparse_weights arguments arg arg If Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "normalize_table_name", + "source_code": "def normalize_table_name(self, table_name):\n return re.sub('[^a-zA-Z0-9]', '', table_name.title())", + "docstring": "Translate the table name to a Python-compatible model name.", + "type": "method", + "file_path": "django\\django\\core\\management\\commands\\inspectdb.py", + "ast_data": "FunctionDef name:normalize_table_name arg:self arg:table_name arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_get_imagesave_wildcards", + "source_code": "def _get_imagesave_wildcards(self):\n default_filetype = self.get_default_filetype()\n filetypes = self.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(filetypes.items())\n wildcards = []\n extensions = []\n filter_index = 0\n for i, (name, exts) in enumerate(sorted_filetypes):\n ext_list = ';'.join(['*.%s' % ext for ext in exts])\n extensions.append(exts[0])\n wildcard = f'{name} ({ext_list})|{ext_list}'\n if default_filetype in exts:\n filter_index = i\n wildcards.append(wildcard)\n wildcards = '|'.join(wildcards)\n return (wildcards, extensions, filter_index)", + "docstring": "Return the wildcard string for the filesave dialog.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:_get_imagesave_wildcards arg:self arguments arg Assign Call Assign Call Assign Call Call Assign Assign Assign For Call Assign Call Call Assign If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_placeholders", + "source_code": "def _create_placeholders(args, kwargs, arg_names=None):\n signature_context = trace_type.InternalTracingContext(is_legacy_signature=True)\n arg_trace_types = trace_type.from_value(tuple(args), signature_context)\n kwarg_trace_types = trace_type.from_value(kwargs, signature_context)\n placeholder_mapping = signature_context.get_placeholder_mapping()\n placeholder_context = trace_type.InternalPlaceholderContext(ops.get_default_graph(), placeholder_mapping)\n if arg_names is None:\n arg_names = [None] * len(arg_trace_types.components)\n func_args = []\n for name, trace_type_arg in zip(arg_names, arg_trace_types.components):\n placeholder_context.update_naming_scope(name)\n placeholder = trace_type_arg.placeholder_value(placeholder_context)\n func_args.append(placeholder)\n func_kwargs = {}\n for name, trace_type_kwarg in zip(*sorted(kwarg_trace_types.mapping.items())):\n placeholder_context.update_naming_scope(name)\n placeholder = trace_type_kwarg.placeholder_value(placeholder_context)\n func_kwargs[name] = placeholder\n return (tuple(func_args), func_kwargs)", + "docstring": "Create placeholders given positional args and keyword args.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:_create_placeholders arg:args arg:kwargs arg:arg_names arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Assign For Call Call Assign Call Call Assign For Call Call Call Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "lookup_prefix", + "source_code": "def lookup_prefix(self, prefix, n):\n commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]\n return commands[-n:]", + "docstring": "Look up the n most recent commands that starts with prefix. Args: prefix: The prefix to lookup. n: Number of most recent commands to look up. Returns: A list of n most recent commands that have the specified prefix, or all available most recent commands that have the prefix, if n exceeds the number of history commands with the prefix.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:lookup_prefix arg:self arg:prefix arg:n arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "LogFormatterSciNotation", + "source_code": "class LogFormatterSciNotation(LogFormatterMathtext):\n\n def _non_decade_format(self, sign_string, base, fx, usetex):\n b = float(base)\n exponent = math.floor(fx)\n coeff = b ** (fx - exponent)\n if _is_close_to_int(coeff):\n coeff = round(coeff)\n return '$\\\\mathdefault{%s%g\\\\times%s^{%d}}$' % (sign_string, coeff, base, exponent)", + "docstring": "Format values following scientific notation in a logarithmic axis.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "ClassDef name:LogFormatterSciNotation FunctionDef name:_non_decade_format arg:self arg:sign_string arg:base arg:fx arg:usetex arguments arg arg arg arg arg Assign Call Assign Call Assign If Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_apply_fn", + "source_code": "def _apply_fn(dataset):\n return dataset.rejection_resample(class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed)", + "docstring": "Function from to that applies the transformation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\resampling.py", + "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call" + }, + { + "library": "cryptography", + "name": "public_numbers", + "source_code": "@abc.abstractmethod\ndef public_numbers(self) -> DHPublicNumbers:\n pass", + "docstring": "Returns a DHPublicNumbers.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py", + "ast_data": "FunctionDef name:public_numbers arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "AlreadyExistsError", + "source_code": "@tf_export('errors.AlreadyExistsError')\nclass AlreadyExistsError(OpError):\n\n def __init__(self, node_def, op, message, *args):\n super(AlreadyExistsError, self).__init__(node_def, op, message, ALREADY_EXISTS, *args)", + "docstring": "Raised when an entity that we attempted to create already exists. An API raises this this error to avoid overwriting an existing resource, value, etc. Calling a creation API multiple times with the same arguments could raise this error if the creation API is not idempotent. For example, running an operation that saves a file (e.g. ) could potentially raise this exception if an explicit filename for an existing file was passed.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "ClassDef name:AlreadyExistsError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "pytorch", + "name": "guard_size_oblivious", + "source_code": "def guard_size_oblivious(expr: Union[torch.SymBool, bool]) -> bool:\n if isinstance(expr, torch.SymBool):\n return expr.node.guard_size_oblivious('', 0)\n else:\n assert isinstance(expr, bool), expr\n return expr", + "docstring": "Perform a guard on a symbolic boolean expression in a size oblivious way. This is typically used when a non-oblivious test would result in a guard on a data dependent value of which we don't know the value of at compile time. When a guard is tested this way, we may diverge in behavior from how regular PyTorch semantics would treat it. For more information, see", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:guard_size_oblivious arg:expr arguments arg If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "contains", + "source_code": "def contains(self, other):\n found = []\n\n def visit(expr, found=found):\n if found:\n return expr\n elif expr == other:\n found.append(1)\n return expr\n self.traverse(visit)\n return len(found) != 0", + "docstring": "Check if self contains other.", + "type": "method", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:contains arg:self arg:other arguments arg arg Assign FunctionDef name:visit arg:expr arg:found arguments arg arg If Return return:yes If Compare Call Return return:yes Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_create_variables_and_slots", + "source_code": "def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]:\n variables = {}\n for stacked_table_name, tables in self._stacked_table_to_tables.items():\n variables[stacked_table_name] = self._create_variables(tables, stacked_table_name=stacked_table_name)\n return variables", + "docstring": "Create variables for TPU embeddings. Returns: A dict of dicts. The outer dict is keyed by the table names and the inner dicts are keyed by 'parameters' and the slot variable names.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_create_variables_and_slots arg:self arguments arg Assign For Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "ContinuousRangeField", + "source_code": "class ContinuousRangeField(RangeField):\n\n def __init__(self, *args, default_bounds=CANONICAL_RANGE_BOUNDS, **kwargs):\n if default_bounds not in ('[)', '(]', '()', '[]'):\n raise ValueError(\"default_bounds must be one of '[)', '(]', '()', or '[]'.\")\n self.default_bounds = default_bounds\n super().__init__(*args, **kwargs)\n\n def get_prep_value(self, value):\n if isinstance(value, (list, tuple)):\n return self.range_type(value[0], value[1], self.default_bounds)\n return super().get_prep_value(value)\n\n def formfield(self, **kwargs):\n kwargs.setdefault('default_bounds', self.default_bounds)\n return super().formfield(**kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n if self.default_bounds and self.default_bounds != CANONICAL_RANGE_BOUNDS:\n kwargs['default_bounds'] = self.default_bounds\n return (name, path, args, kwargs)", + "docstring": "Continuous range field. It allows specifying default bounds for list and tuple inputs.", + "type": "class", + "file_path": "django\\django\\contrib\\postgres\\fields\\ranges.py", + "ast_data": "ClassDef name:ContinuousRangeField FunctionDef name:__init__ arg:self arguments arg arg arg arg If Compare Raise Call Assign Call Call FunctionDef name:get_prep_value arg:self arg:value arguments arg arg If Call Return return:yes Call Return return:yes Call Call FunctionDef name:formfield arg:self arguments arg arg Call Return return:yes Call Call FunctionDef name:deconstruct arg:self arguments arg Assign Call Call If BoolOp Compare Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_label", + "source_code": "def get_label(self):\n return self._label", + "docstring": "Return the label used for this artist in the legend.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_label arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_mutation_scale", + "source_code": "def get_mutation_scale(self):\n return self._mutation_scale", + "docstring": "Return the mutation scale. Returns ------- scalar", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_mutation_scale arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "Hartmann3", + "source_code": "class Hartmann3(Benchmark):\n\n def __init__(self, dimensions=3):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))\n self.global_optimum = [[0.11461292, 0.55564907, 0.85254697]]\n self.fglob = -3.8627821478\n self.a = asarray([[3.0, 10.0, 30.0], [0.1, 10.0, 35.0], [3.0, 10.0, 30.0], [0.1, 10.0, 35.0]])\n self.p = asarray([[0.3689, 0.117, 0.2673], [0.4699, 0.4387, 0.747], [0.1091, 0.8732, 0.5547], [0.03815, 0.5743, 0.8828]])\n self.c = asarray([1.0, 1.2, 3.0, 3.2])\n\n def fun(self, x, *args):\n self.nfev += 1\n XX = np.atleast_2d(x)\n d = sum(self.a * (XX - self.p) ** 2, axis=1)\n return -sum(self.c * exp(-d))", + "docstring": "Hartmann3 objective function. This class defines the Hartmann3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Hartmann3}}(x) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2} Where, in this exercise: .. math:: \\begin{array}{l|ccc|c|ccr} \\hline i & & a_{ij}& & c_i & & p_{ij} & \\\\ \\hline 1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.3689 & 0.1170 & 0.2673 \\\\ 2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\\\ 3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\\\ 4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.03815 & 0.5743 & 0.8828 \\\\ \\hline \\end{array} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil #62 has an incorrect coefficient. p[1, 1] should be 0.4387", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py", + "ast_data": "ClassDef name:Hartmann3 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "_new_gnu_trans", + "source_code": "def _new_gnu_trans(self, localedir, use_null_fallback=True):\n return gettext_module.translation(domain=self.domain, localedir=localedir, languages=[self.__locale], fallback=use_null_fallback)", + "docstring": "Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param to avoid confusion with any other references to 'fallback'.", + "type": "method", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:_new_gnu_trans arg:self arg:localedir arg:use_null_fallback arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_test_mode", + "source_code": "@contextmanager\ndef _test_mode() -> Generator[None, None, None]:\n global _is_test_mode\n prev = _is_test_mode\n try:\n _is_test_mode = True\n yield\n finally:\n _is_test_mode = prev", + "docstring": "Forces `` namespace to use fallback implementations. The context manager is not thread safe.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py", + "ast_data": "FunctionDef name:_test_mode arguments Assign Try Assign Assign" + }, + { + "library": "seaborn", + "name": "axes_dict", + "source_code": "@property\ndef axes_dict(self):\n return self._axes_dict", + "docstring": "A mapping of facet names to corresponding :class:. If only one of `` tuple.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:axes_dict arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "truncated_normal", + "source_code": "def truncated_normal(self, shape, mean, stddev, dtype):\n if self.seed:\n op = stateless_random_ops.stateless_truncated_normal\n else:\n op = random_ops.truncated_normal\n return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)", + "docstring": "A deterministic truncated normal if seed is passed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", + "ast_data": "FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call" + }, + { + "library": "django", + "name": "cycle_key", + "source_code": "def cycle_key(self):\n data = self._session\n key = self.session_key\n self.create()\n self._session_cache = data\n if key:\n self.delete(key)", + "docstring": "Create a new session key, while retaining the current session data.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "FunctionDef name:cycle_key arg:self arguments arg Assign Assign Call Assign If Call" + }, + { + "library": "tensorflow", + "name": "__enter__", + "source_code": "def __enter__(self) -> str:\n ctx = context.context()\n if ctx.executing_eagerly():\n old_name = ctx.scope_name\n name = self._name\n if not name:\n scope_name = ''\n elif name[-1] == '/':\n scope_name = name\n elif old_name:\n scope_name = old_name + name + '/'\n else:\n scope_name = name + '/'\n ctx.scope_name = scope_name\n\n def _restore_name_scope(*_):\n ctx.scope_name = old_name\n self._exit_fns.append(_restore_name_scope)\n else:\n scope = get_default_graph().name_scope(self._name)\n scope_name = scope.__enter__()\n self._exit_fns.append(scope.__exit__)\n return scope_name", + "docstring": "Start the scope block. Returns: The scope name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign Call If Call Assign Assign If Assign If Compare Assign If Assign Assign Assign FunctionDef name:_restore_name_scope arguments arg Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_nontrivial_match", + "source_code": "def assert_nontrivial_match(self):\n return self.assert_consumed()", + "docstring": "Raises an exception if currently created objects are unmatched.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_find_binning_thresholds", + "source_code": "def _find_binning_thresholds(col_data, max_bins):\n missing_mask = np.isnan(col_data)\n if missing_mask.any():\n col_data = col_data[~missing_mask]\n col_data = np.sort(col_data)\n distinct_values = np.unique(col_data).astype(X_DTYPE)\n if len(distinct_values) <= max_bins:\n midpoints = distinct_values[:-1] + distinct_values[1:]\n midpoints *= 0.5\n else:\n percentiles = np.linspace(0, 100, num=max_bins + 1)\n percentiles = percentiles[1:-1]\n midpoints = np.percentile(col_data, percentiles, method='midpoint').astype(X_DTYPE)\n assert midpoints.shape[0] == max_bins - 1\n np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)\n return midpoints", + "docstring": "Extract quantiles from a continuous feature. Missing values are ignored for finding the thresholds. Parameters ---------- col_data : array-like, shape (n_samples,) The continuous feature to bin. max_bins: int The maximum number of bins to use for non-missing values. If for a given feature the number of unique values is less than ``, then those unique values will be used to compute the bin thresholds, instead of the quantiles Return ------ binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,) The increasing numeric values that can be used to separate the bins. A given value x will be mapped into bin value i iff bining_thresholds[i - 1] < x <= binning_thresholds[i]", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\binning.py", + "ast_data": "FunctionDef name:_find_binning_thresholds arg:col_data arg:max_bins arguments arg arg Assign Call If Call Assign Assign Call Assign Call Call If Compare Call Assign Assign Call Assign Assign Call Call Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "validate_checkpoint_id", + "source_code": "@classmethod\n@abc.abstractmethod\ndef validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n ...", + "docstring": "Check if the given checkpoint_id is supported by the stroage. This allow us to enable automatic storage selection.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", + "ast_data": "FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg" + }, + { + "library": "django", + "name": "naturaltime", + "source_code": "@register.filter\ndef naturaltime(value):\n return NaturalTimeFormatter.string_for(value)", + "docstring": "For date and time values show how many seconds, minutes, or hours ago compared to current timestamp return representing string.", + "type": "function", + "file_path": "django\\django\\contrib\\humanize\\templatetags\\humanize.py", + "ast_data": "FunctionDef name:naturaltime arg:value arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_update_sparse_core_buffer_size_after_table_stacking", + "source_code": "def _update_sparse_core_buffer_size_after_table_stacking(self):\n for table_name in self._stacked_table_to_tables:\n if self._sparse_core_embedding_config.max_ids_per_table is None or table_name not in self._sparse_core_embedding_config.max_ids_per_table:\n logging.warning('Table %s is not found in max_ids_per_table provided by SparseCoreEmbeddingConfig. Using default value 256.', table_name)\n self._table_to_max_ids_per_sparse_core[table_name] = self.DEFAULT_MAX_IDS_PER_TABLE\n else:\n self._table_to_max_ids_per_sparse_core[table_name] = self._sparse_core_embedding_config.max_ids_per_table[table_name]\n if self._sparse_core_embedding_config.max_unique_ids_per_table is None or table_name not in self._sparse_core_embedding_config.max_unique_ids_per_table:\n logging.warning('Table %s is not found in max_unique_ids_per_table provided by SparseCoreEmbeddingConfig. Using default value 256.', table_name)\n self._table_to_max_unique_ids_per_sparse_core[table_name] = self.DEFAULT_MAX_UNIQUE_IDS_PER_TABLE\n else:\n self._table_to_max_unique_ids_per_sparse_core[table_name] = self._sparse_core_embedding_config.max_unique_ids_per_table[table_name]", + "docstring": "Update the sparse core buffer size after table stacking.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_update_sparse_core_buffer_size_after_table_stacking arg:self arguments arg For If BoolOp Compare Compare Call Assign Assign If BoolOp Compare Compare Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "_unblock_model_reconstruction", + "source_code": "def _unblock_model_reconstruction(self, layer_id, layer):\n for model_id, v in self.model_layer_dependencies.items():\n _, layers = v\n if layer_id not in layers:\n continue\n layers[layers.index(layer_id)] = layer\n if all((isinstance(x, base_layer.Layer) for x in layers)):\n self._models_to_reconstruct.append(model_id)", + "docstring": "Removes layer from blocking model reconstruction.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_unblock_model_reconstruction arg:self arg:layer_id arg:layer arguments arg arg arg For Call Assign If Compare Assign Call If Call Call Call" + }, + { + "library": "pytorch", + "name": "FlatArgsAdapter", + "source_code": "class FlatArgsAdapter(abc.ABC):\n\n @abc.abstractmethod\n def adapt(self, target_spec: pytree.TreeSpec, input_spec: pytree.TreeSpec, input_args: list[Any], metadata: Optional[dict[str, Any]]=None, obj: Optional[Any]=None) -> list[Any]:\n ...", + "docstring": "Adapts input arguments with ``.", + "type": "class", + "file_path": "pytorch\\torch\\export\\unflatten.py", + "ast_data": "ClassDef name:FlatArgsAdapter FunctionDef name:adapt arg:self arg:target_spec arg:input_spec arg:input_args arg:metadata arg:obj arguments arg arg arg arg arg arg" + }, + { + "library": "pytorch", + "name": "ipu", + "source_code": "def ipu(self, device: Optional[Union[int, device]]=None) -> Self:\n return self._apply(lambda t: t.ipu(device))", + "docstring": "Move all model parameters and buffers to the IPU. This also makes associated parameters and buffers different objects. So it should be called before constructing the optimizer if the module will live on IPU while being optimized. .. note:: This method modifies the module in-place. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:ipu arg:self arg:device arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_single_shard_restore", + "source_code": "def _single_shard_restore(file_prefix: tensor_lib.Tensor, shardable_tensors: Sequence[sharding_util.ShardableTensor], options: 'checkpoint_options.CheckpointOptions | None'=None) -> sharding_util.Shard:\n options = options or checkpoint_options.CheckpointOptions()\n tensor_names = []\n tensor_dtypes = []\n slice_specs = []\n for shardable_tensor in shardable_tensors:\n if shardable_tensor._tensor_save_spec:\n name = shardable_tensor._tensor_save_spec.name\n spec = shardable_tensor._tensor_save_spec.slice_spec\n else:\n name, spec = (shardable_tensor.checkpoint_key, shardable_tensor.slice_spec)\n tensor_names.append(name)\n slice_specs.append(spec)\n tensor_dtypes.append(shardable_tensor.dtype)\n restore_device = options.experimental_io_device or 'cpu:0'\n with ops.device(restore_device):\n restored_tensors = io_ops.restore_v2(file_prefix, tensor_names, slice_specs, tensor_dtypes)\n restored_tensor_dict = {}\n for shardable_tensor in shardable_tensors:\n restored_tensor = restored_tensors.pop(0)\n restored_tensor_dict.setdefault(shardable_tensor.checkpoint_key, {})[shardable_tensor.slice_spec] = restored_tensor\n return restored_tensor_dict", + "docstring": "Restore the saveable objects from a checkpoint with . Args: file_prefix: A string or scalar string Tensor containing the prefix for files to read from. shardable_tensors: A list of ShardableTensors to restore. options: Optional object. Returns: A restored tensor dict (maps checkpoint_key -> slice_spec -> tensor).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py", + "ast_data": "FunctionDef name:_single_shard_restore arg:file_prefix arg:shardable_tensors arg:options arguments arg arg arg Assign BoolOp Call Assign Assign Assign For If Assign Assign Assign Call Call Call Assign BoolOp With Call Assign Call Assign For Assign Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "ENUM", + "source_code": "class ENUM:\n\n def __init__(self, *candidates: str | bool | None) -> None:\n self._candidates = frozenset(candidates)\n\n def __repr__(self) -> str:\n return f'ENUM({', '.join(sorted(map(repr, self._candidates)))})'\n\n def match(self, value: str | bool | None | Sequence[str | bool | None]) -> bool:\n if isinstance(value, str | bool | None):\n return value in self._candidates\n return all((item in self._candidates for item in value))", + "docstring": "Represents the candidates which a config value should be one of. Example: app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))", + "type": "class", + "file_path": "sphinx\\sphinx\\config.py", + "ast_data": "ClassDef name:ENUM FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:match arg:self arg:value arguments arg arg If Call Return return:yes Compare Return return:yes Call Compare" + }, + { + "library": "django", + "name": "get_list_select_related", + "source_code": "def get_list_select_related(self, request):\n return self.list_select_related", + "docstring": "Return a list of fields to add to the select_related() part of the changelist items query.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_list_select_related arg:self arg:request arguments arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "batch_2x2_ellipse", + "source_code": "def batch_2x2_ellipse(m: Tensor) -> Tuple[Tensor, Tensor]:\n am = m[..., 0, 0]\n bm = m[..., 0, 1]\n cm = m[..., 1, 0]\n dm = m[..., 1, 1]\n a = am ** 2 + bm ** 2\n b = am * cm + bm * dm\n d = cm ** 2 + dm ** 2\n trh = (a + d) / 2\n sqrtdisc = torch.sqrt(((a - d) / 2) ** 2 + b ** 2)\n eigenvals = torch.stack([trh + sqrtdisc, trh - sqrtdisc], dim=-1).clamp(min=0)\n dens = eigenvals - a.unsqueeze(-1)\n dens[torch.abs(dens) < 1e-06] = 1e-06\n eigenvecs = torch.stack([b.unsqueeze(-1) / dens, torch.ones_like(dens)], dim=-2)\n eigenvecs = eigenvecs / torch.norm(eigenvecs, dim=-2, keepdim=True)\n return (eigenvals, eigenvecs)", + "docstring": "Returns Eigenvalues and Eigenvectors of batch of 2x2 matrices.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\utils.py", + "ast_data": "FunctionDef name:batch_2x2_ellipse arg:m arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Assign Compare Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pygame", + "name": "array_green", + "source_code": "def array_green(surface):\n size = surface.get_size()\n array = numpy.empty(size, numpy.uint8)\n surface_to_array(array, surface, 'G')\n return array", + "docstring": "pygame.surfarray.array_green(Surface): return array copy pixel green into a 2d array Copy the pixel green values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:array_green arg:surface arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "navigation_doc_metadata", + "source_code": "def navigation_doc_metadata(self, navlist: list[NavPoint]) -> dict[str, Any]:\n return {'lang': html.escape(self.config.epub_language), 'toc_locale': html.escape(self.guide_titles['toc']), 'navlist': navlist}", + "docstring": "Create a dictionary with all metadata for the nav.xhtml file properly escaped.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\epub3.py", + "ast_data": "FunctionDef name:navigation_doc_metadata arg:self arg:navlist arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_check_compatible_with", + "source_code": "def _check_compatible_with(self, other: DTScalarOrNaT) -> None:\n raise AbstractMethodError(self)", + "docstring": "Verify that and are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_check_compatible_with arg:self arg:other arguments arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_n_features_out", + "source_code": "@property\ndef _n_features_out(self):\n return self.components_.shape[0]", + "docstring": "Number of transformed output features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py", + "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "geometry_field", + "source_code": "def geometry_field(self):\n opts = self.model._meta\n return opts.get_field(self.geom_field)", + "docstring": "Return the GeometryField instance associated with the geographic column.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py", + "ast_data": "FunctionDef name:geometry_field arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "join", + "source_code": "@property\ndef join(self) -> timedelta:\n return self._join", + "docstring": "Get the join timeout.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:join arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_dense_tensor", + "source_code": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError('In indicator_column: {}. categorical_column must not be of type _SequenceCategoricalColumn. Suggested fix A: If you wish to use input_layer, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use sequence_input_layer instead of input_layer. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n return inputs.get(self)", + "docstring": "Returns dense representing feature. Args: inputs: A object to access inputs. weight_collections: Unused since no variables are created in this function. trainable: Unused bool since no variables are created in this function. Returns: Dense created within . Raises: ValueError: If is a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg If Call Raise Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self) -> None:\n self._opset_version = _constants.TORCHLIB_OPSET\n self.functions: dict[TorchOp | str, list[OnnxDecompMeta]] = {}", + "docstring": "Initializes the registry", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign" + }, + { + "library": "tensorflow", + "name": "_ensure_same_dataset_graph", + "source_code": "def _ensure_same_dataset_graph(dataset):\n current_graph = ops.get_default_graph()\n bfs_q = queue.Queue()\n bfs_q.put(dataset)\n visited = []\n while not bfs_q.empty():\n ds = bfs_q.get()\n visited.append(ds)\n ds_graph = ds._graph\n if current_graph != ds_graph:\n raise ValueError(f'The graph {current_graph} of the iterator is different from the graph {ds_graph} the dataset: {ds._variant_tensor} was created in. Make sure that the dataset is created in the same graph as the iterator.')\n for input_ds in ds._inputs():\n if input_ds not in visited:\n bfs_q.put(input_ds)", + "docstring": "Walks the dataset graph to ensure all datasets come from the same graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:_ensure_same_dataset_graph arg:dataset arguments arg Assign Call Assign Call Call Assign While Call Assign Call Call Assign If Compare Raise Call For Call If Compare Call" + }, + { + "library": "scikit-learn", + "name": "_chisquare", + "source_code": "def _chisquare(f_obs, f_exp):\n f_obs = np.asarray(f_obs, dtype=np.float64)\n k = len(f_obs)\n chisq = f_obs\n chisq -= f_exp\n chisq **= 2\n with np.errstate(invalid='ignore'):\n chisq /= f_exp\n chisq = chisq.sum(axis=0)\n return (chisq, special.chdtrc(k - 1, chisq))", + "docstring": "Fast replacement for scipy.stats.chisquare. Version from with additional optimizations.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py", + "ast_data": "FunctionDef name:_chisquare arg:f_obs arg:f_exp arguments arg arg Assign Call Assign Call Assign With Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "tril_indices_from", + "source_code": "@array_function_dispatch(_trilu_indices_form_dispatcher)\ndef tril_indices_from(arr, k=0):\n if arr.ndim != 2:\n raise ValueError('input array must be 2-d')\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])", + "docstring": "Return the indices for the lower-triangle of arr. See for full details. Parameters ---------- arr : array_like The indices will be valid for square arrays whose dimensions are the same as arr. k : int, optional Diagonal offset (see for details). Examples -------- >>> import numpy as np Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Pass the array to get the indices of the lower triangular elements. >>> trili = np.tril_indices_from(a) >>> trili (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) >>> a[trili] array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) This is syntactic sugar for tril_indices(). >>> np.tril_indices(a.shape[0]) (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) Use the parameter to return the indices for the lower triangular array up to the k-th diagonal. >>> trili1 = np.tril_indices_from(a, k=1) >>> a[trili1] array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) See Also -------- tril_indices, tril, triu_indices_from", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_twodim_base_impl.py", + "ast_data": "FunctionDef name:tril_indices_from arg:arr arg:k arguments arg arg If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__str__", + "source_code": "def __str__(self) -> str:\n return 'while_loop(%s)' % self.name", + "docstring": "String representation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "lazy_load_stub_paste", + "source_code": "def lazy_load_stub_paste():\n global copy, paste\n copy, paste = determine_clipboard()\n return paste()", + "docstring": "A stub function for paste(), which will load the real paste() function when called so that the real paste() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses.", + "type": "function", + "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py", + "ast_data": "FunctionDef name:lazy_load_stub_paste arguments Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "meshgrid", + "source_code": "@array_function_dispatch(_meshgrid_dispatcher)\ndef meshgrid(*xi, copy=True, sparse=False, indexing='xy'):\n ndim = len(xi)\n if indexing not in ['xy', 'ij']:\n raise ValueError(\"Valid values for `indexing` are 'xy' and 'ij'.\")\n s0 = (1,) * ndim\n output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)]\n if indexing == 'xy' and ndim > 1:\n output[0].shape = (1, -1) + s0[2:]\n output[1].shape = (-1, 1) + s0[2:]\n if not sparse:\n output = np.broadcast_arrays(*output, subok=True)\n if copy:\n output = tuple((x.copy() for x in output))\n return output", + "docstring": "Return a tuple of coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. sparse : bool, optional If True the shape of the returned coordinate array for dimension *i* is reduced from `basics.broadcastingx1x2xnxix1x2how-to-indexmeshgridmeshgrid` is very useful to evaluate functions on a grid. If the function depends on all coordinates, both dense and sparse outputs can be used. >>> x = np.linspace(-5, 5, 101) >>> y = np.linspace(-5, 5, 101) >>> # full coordinate arrays >>> xx, yy = np.meshgrid(x, y) >>> zz = np.sqrt(xx**2 + yy**2) >>> xx.shape, yy.shape, zz.shape ((101, 101), (101, 101), (101, 101)) >>> # sparse coordinate arrays >>> xs, ys = np.meshgrid(x, y, sparse=True) >>> zs = np.sqrt(xs**2 + ys**2) >>> xs.shape, ys.shape, zs.shape ((1, 101), (101, 1), (101, 101)) >>> np.array_equal(zz, zs) True >>> h = plt.contourf(x, y, zs) >>> plt.axis('scaled') >>> plt.colorbar() >>> plt.show()", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:meshgrid arguments arg arg arg arg Assign Call If Compare Raise Call Assign Assign Call Call Call If BoolOp Compare Compare Assign Assign If Assign Call If Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "top_right", + "source_code": "@property\ndef top_right(self) -> torch.Tensor:\n out = self.top_left\n out[..., 0] += self.width\n return out", + "docstring": "The [x y] position of the top-left coordinate of the bounding box.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\face_detection.py", + "ast_data": "FunctionDef name:top_right arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_joint_log_likelihood", + "source_code": "@abstractmethod\ndef _joint_log_likelihood(self, X):\n pass", + "docstring": "Compute the unnormalized posterior log probability of X I.e. `` for all rows x of X, as an array-like of shape (n_samples, n_classes). Public methods predict, predict_proba, predict_log_proba, and predict_joint_log_proba pass the input through _check_X before handing it over to _joint_log_likelihood. The term \"joint log likelihood\" is used interchangibly with \"joint log probability\".", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:_joint_log_likelihood arg:self arg:X arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_compute_dtype", + "source_code": "@property\ndef _compute_dtype(self):\n return self._dtype_policy.compute_dtype", + "docstring": "The layer's compute dtype. Unless mixed-precision is used, this is the same as . If self._autocast is True, layer's will cast floating-point inputs to this. Returns: The layer's compute dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_compute_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_np_to_tnp_map", + "source_code": "@functools.lru_cache(maxsize=1)\ndef get_np_to_tnp_map():\n from ..utils import NP_TO_TNP_MODULE\n np_fn_to_tnp_fn = {}\n for np_mod, tnp_mod in NP_TO_TNP_MODULE.items():\n for fn_name, tnp_fn in tnp_mod.__dict__.items():\n if callable(tnp_fn):\n if (np_fn := getattr(np_mod, fn_name, None)):\n np_fn_to_tnp_fn[np_fn] = tnp_fn\n return np_fn_to_tnp_fn", + "docstring": "This generates a mapping from numpy modules to their torch._numpy modules equivalents.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py", + "ast_data": "FunctionDef name:get_np_to_tnp_map arguments Assign For Call For Call If Call If Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "cholesky", + "source_code": "@_apply_over_batch(('a', 2))\ndef cholesky(a, lower=False, overwrite_a=False, check_finite=True):\n c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, check_finite=check_finite)\n return c", + "docstring": "Compute the Cholesky decomposition of a matrix. Returns the Cholesky decomposition, :math: or :math: of a Hermitian positive-definite matrix A. Parameters ---------- a : (M, M) array_like Matrix to be decomposed lower : bool, optional Whether to compute the upper- or lower-triangular Cholesky factorization. During decomposition, only the selected half of the matrix is referenced. Default is upper-triangular. overwrite_a : bool, optional Whether to overwrite data in (may improve performance). check_finite : bool, optional Whether to check that the entire input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (M, M) ndarray Upper- or lower-triangular Cholesky factor of . Raises ------ LinAlgError : if decomposition fails. Notes ----- During the finiteness check (if selected), the entire matrix is checked. During decomposition, is assumed to be symmetric or Hermitian (as applicable), and only the half selected by option is referenced. Consequently, if is asymmetric/non-Hermitian, may still succeed if the symmetric/Hermitian matrix represented by the selected half is positive definite, yet it may fail if an element in the other half is non-finite. Examples -------- >>> import numpy as np >>> from scipy.linalg import cholesky >>> a = np.array([[1,-2j],[2j,5]]) >>> L = cholesky(a, lower=True) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> L @ L.T.conj() array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp_cholesky.py", + "ast_data": "FunctionDef name:cholesky arg:a arg:lower arg:overwrite_a arg:check_finite arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None):\n return self.score_samples(X).mean()", + "docstring": "Compute the per-sample average log-likelihood of the given data X. Parameters ---------- X : array-like of shape (n_samples, n_dimensions) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- log_likelihood : float Log-likelihood of under the Gaussian mixture model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "flatten", + "source_code": "def flatten(fields):\n flat = []\n for field in fields:\n if isinstance(field, (list, tuple)):\n flat.extend(field)\n else:\n flat.append(field)\n return flat", + "docstring": "Return a list which is a single level of flattening of the original list.", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\utils.py", + "ast_data": "FunctionDef name:flatten arg:fields arguments arg Assign For If Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register_pointwise", + "source_code": "def register_pointwise(aten_fn, name=None, broadcast=True, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, convert_input_to_bool=False, override_return_dtype=None, override_fn_when_input_bool=None, allow_alpha=False, triton_fallback=None):\n name = name or aten_fn.__name__\n fn = ops_wrapper(name)\n register_op_dtype_propagation_rules(name, type_promotion_kind, override_return_dtype)\n if override_fn_when_input_bool is not None:\n override_fn_when_input_bool = ops_wrapper(override_fn_when_input_bool)\n fn = make_pointwise(fn, override_return_dtype=override_return_dtype, override_fn_when_input_bool=override_fn_when_input_bool, allow_alpha=allow_alpha, triton_fallback=triton_fallback)\n fn = register_lowering(aten_fn, broadcast=broadcast, type_promotion_kind=type_promotion_kind, convert_input_to_bool=convert_input_to_bool)(fn)\n if hasattr(prims, name):\n register_lowering(getattr(prims, name), type_promotion_kind=None, convert_input_to_bool=convert_input_to_bool)(fn)\n return fn", + "docstring": "A pointwise function that maps ops.{name} to inputs", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:register_pointwise arg:aten_fn arg:name arg:broadcast arg:type_promotion_kind arg:convert_input_to_bool arg:override_return_dtype arg:override_fn_when_input_bool arg:allow_alpha arg:triton_fallback arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Assign Call Call If Compare Assign Call Assign Call Assign Call Call If Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_models", + "source_code": "@functools.cache\ndef get_models(self, include_auto_created=False, include_swapped=False):\n self.check_models_ready()\n result = []\n for app_config in self.app_configs.values():\n result.extend(app_config.get_models(include_auto_created, include_swapped))\n return result", + "docstring": "Return a list of all installed models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:get_models arg:self arg:include_auto_created arg:include_swapped arguments arg arg arg Call Assign For Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "will_fusion_create_cycle", + "source_code": "def will_fusion_create_cycle(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n visited = OrderedSet[FusedSchedulerNode]()\n\n def found_path(node: BaseSchedulerNode) -> bool:\n if isinstance(node, FusedSchedulerNode) and node not in visited:\n visited.add(node)\n if node.get_operation_names().issubset(combined_ancestors):\n return False\n else:\n return bool(combined_names & node.ancestors) or any((found_path(self.name_to_fused_node[n]) for n in node.ancestors - combined_ancestors))\n return False\n combined_names = node1.get_operation_names()._dict.keys() | node2.get_operation_names()._dict.keys()\n combined_ancestors = (node1.ancestors._dict.keys() | node2.ancestors._dict.keys()) - combined_names\n cycle = any((found_path(self.name_to_fused_node[n]) for n in combined_ancestors))\n if cycle:\n WhyNoFuse(node1, node2)('will create cycle')\n return cycle", + "docstring": "Finds whether there's a path from node1 to node2 (or vice-versa) caused indirectly by other fusions.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:will_fusion_create_cycle arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call FunctionDef name:found_path arg:node arguments arg If BoolOp Call Compare Call If Call Call Return return:yes Return return:yes BoolOp Call Call Call Return return:yes Assign Call Call Call Call Assign Call Call Assign Call Call If Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_languages", + "source_code": "@functools.lru_cache\ndef get_languages():\n return {key.lower(): value for key, value in dict(settings.LANGUAGES).items()}", + "docstring": "Cache of settings.LANGUAGES in a dictionary for easy lookups by key. Convert keys to lowercase as they should be treated as case-insensitive.", + "type": "function", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:get_languages arguments Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "lookup", + "source_code": "def lookup(self, keys, name=None):\n key_tensor = keys\n if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n key_tensor = keys.values\n if keys.dtype.base_dtype != self._key_dtype:\n raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n with ops.name_scope(name, '%s_Lookup' % self.name, (self.resource_handle, key_tensor, self._default_value)):\n values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, key_tensor, self._default_value)\n values.set_shape(key_tensor.get_shape())\n if isinstance(keys, sparse_tensor.SparseTensor):\n return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)\n elif isinstance(keys, internal.RaggedTensor):\n return keys.with_values(values)\n else:\n return values", + "docstring": "Looks up in a table, outputs the corresponding values. The is used for keys not present in the table. Args: keys: Keys to look up. May be either a or dense . name: A name for the operation (optional). Returns: A if keys are sparse, a if keys are ragged, otherwise a dense . Raises: TypeError: when or doesn't match the table data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg Assign If Call Assign If Compare Raise Call With Call Assign Call Call Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_tick", + "source_code": "def _get_tick(self, major):\n if self._tick_class is None:\n raise NotImplementedError(f'The Axis subclass {self.__class__.__name__} must define _tick_class or reimplement _get_tick()')\n tick_kw = self._major_tick_kw if major else self._minor_tick_kw\n return self._tick_class(self.axes, 0, major=major, **tick_kw)", + "docstring": "Return the default tick instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_get_tick arg:self arg:major arguments arg arg If Compare Raise Call Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X):\n raw_predictions = self.decision_function(X)\n return self._loss.predict_proba(raw_predictions)", + "docstring": "Predict class probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_` does not support probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "Sobel", + "source_code": "class Sobel(Module):\n\n def __init__(self, normalized: bool=True, eps: float=1e-06) -> None:\n super().__init__()\n self.normalized: bool = normalized\n self.eps: float = eps\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(normalized={self.normalized})'\n\n def forward(self, input: Tensor) -> Tensor:\n return sobel(input, self.normalized, self.eps)", + "docstring": "Compute the Sobel operator and returns the magnitude per channel. Args: normalized: if True, L1 norm of the kernel is set to 1. eps: regularization number to avoid NaN during backprop. Return: the sobel edge gradient magnitudes map. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = Sobel()(input) # 1x3x4x4", + "type": "class", + "file_path": "kornia\\kornia\\filters\\sobel.py", + "ast_data": "ClassDef name:Sobel FunctionDef name:__init__ arg:self arg:normalized arg:eps arguments arg arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "KeyValTupleParam", + "source_code": "class KeyValTupleParam(KeyValTuple):\n pass", + "docstring": "Dummy class for correctly rendering key-value tuples from parameters.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py", + "ast_data": "ClassDef name:KeyValTupleParam" + }, + { + "library": "cherrypy", + "name": "_sanitize", + "source_code": "@classmethod\ndef _sanitize(cls, raw):\n return cls.dangerous.sub('', raw)", + "docstring": "Clean up the CR LF chars from input.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:_sanitize arg:cls arg:raw arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_fontext_synonyms", + "source_code": "def get_fontext_synonyms(fontext):\n return {'afm': ['afm'], 'otf': ['otf', 'ttc', 'ttf'], 'ttc': ['otf', 'ttc', 'ttf'], 'ttf': ['otf', 'ttc', 'ttf']}[fontext]", + "docstring": "Return a list of file extensions that are synonyms for the given file extension *fileext*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:get_fontext_synonyms arg:fontext arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "shapes", + "source_code": "@property\ndef shapes(self):\n return self._shapes", + "docstring": "The list of shapes for each component of a staging area element.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:shapes arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "__setitem__", + "source_code": "def __setitem__(self, index, val):\n if isinstance(index, slice):\n self._set_slice(index, val)\n else:\n index = self._checkindex(index)\n self._check_allowed((val,))\n self._set_single(index, val)", + "docstring": "Set the item(s) at the specified index/slice.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:__setitem__ arg:self arg:index arg:val arguments arg arg arg If Call Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "ShapeComputeModule", + "source_code": "class ShapeComputeModule(torch.nn.Module):\n pass", + "docstring": "Code-gen-ed module for tensor shape computation. module.prepare will mutate ser_model according to the computed operand shapes, based on the shapes of args. Returns a list of output templates.", + "type": "class", + "file_path": "pytorch\\torch\\backends\\_nnapi\\prepare.py", + "ast_data": "ClassDef name:ShapeComputeModule" + }, + { + "library": "django", + "name": "get_field_size", + "source_code": "def get_field_size(name):\n m = field_size_re.search(name)\n return int(m[1]) if m else None", + "docstring": "Extract the size number from a \"varchar(11)\" type name", + "type": "function", + "file_path": "django\\django\\db\\backends\\sqlite3\\introspection.py", + "ast_data": "FunctionDef name:get_field_size arg:name arguments arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "_get_dated_items", + "source_code": "def _get_dated_items(self, date):\n lookup_kwargs = self._make_single_date_lookup(date)\n qs = self.get_dated_queryset(**lookup_kwargs)\n return (None, qs, {'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date)})", + "docstring": "Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:_get_dated_items arg:self arg:date arguments arg arg Assign Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_help_handler", + "source_code": "def _help_handler(self, args, screen_info=None):\n _ = screen_info\n if not args:\n return self.get_help()\n elif len(args) == 1:\n return self.get_help(args[0])\n else:\n return RichTextLines(['ERROR: help takes only 0 or 1 input argument.'])", + "docstring": "Command handler for \"help\". \"help\" is a common command that merits built-in support from this class. Args: args: Command line arguments to \"help\" (not including \"help\" itself). screen_info: (dict) Information regarding the screen, e.g., the screen width in characters: {\"cols\": 80} Returns: (RichTextLines) Screen text output.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:_help_handler arg:self arg:args arg:screen_info arguments arg arg arg Assign If Return return:yes Call If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_pow1pm1", + "source_code": "def _pow1pm1(x, y):\n return np.expm1(sc.xlog1py(y, x))", + "docstring": "Compute (1 + x)**y - 1. Uses expm1 and xlog1py to avoid loss of precision when (1 + x)**y is close to 1. Note that the inverse of this function with respect to x is ``. That is, if t = _pow1pm1(x, y) then x = _pow1pm1(t, 1/y)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:_pow1pm1 arg:x arg:y arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_replicate_to_shard", + "source_code": "def _replicate_to_shard(self, local_tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, shard_index: int) -> torch.Tensor:\n num_chunks = mesh.size(mesh_dim=mesh_dim)\n shards, _ = self._split_tensor(local_tensor, num_chunks, with_padding=False, contiguous=False)\n return shards[shard_index].clone()", + "docstring": "transform from replicated tensor to a sharded tensor on the current rank, which would perform a local chunk", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "FunctionDef name:_replicate_to_shard arg:self arg:local_tensor arg:mesh arg:mesh_dim arg:shard_index arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "_apply_rel_filters", + "source_code": "def _apply_rel_filters(self, queryset):\n queryset._add_hints(instance=self.instance)\n if self._db:\n queryset = queryset.using(self._db)\n queryset._defer_next_filter = True\n return queryset._next_is_sticky().filter(**self.core_filters)", + "docstring": "Filter the queryset for the instance this manager is bound to.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "FunctionDef name:_apply_rel_filters arg:self arg:queryset arguments arg arg Call If Assign Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name, **kwargs):\n if enabled:\n self._traceme = _pywrap_traceme.TraceMe(name, **kwargs)\n else:\n self._traceme = None", + "docstring": "Creates a trace event in the profiler. Args: name: The name of the trace event. **kwargs: Keyword arguments added to the trace event. Both the key and value are of types that can be converted to strings, which will be interpreted by the profiler according to the traceme name. Example usage: The example above uses the keyword argument \"step_num\" to specify the training step being traced.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\trace.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg arg If Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_generate_tracing_options", + "source_code": "def _generate_tracing_options(self, fn, scope_type):\n attributes = self._attributes.copy()\n share = self._shared_rendezvous\n if share is not None:\n attributes[attributes_lib.SHARED_RENDEZVOUS] = share\n if self._jit_compile is not None:\n attributes[attributes_lib.XLA_COMPILE] = bool(self._jit_compile)\n if self._jit_compile:\n attributes[attributes_lib.NO_INLINE] = True\n if self._autograph:\n fn = autograph_util.py_func_from_autograph(fn, self._experimental_autograph_options)\n return tracing_compilation.TracingOptions(fn, self._name, polymorphic_type=self._function_type, default_values=self._default_values, scope_type=scope_type, attributes=attributes, autograph=self._autograph, reduce_retracing=self._reduce_retracing, autograph_options=self._experimental_autograph_options, function_cache=self._function_cache, function_captures=self._function_captures, lock=self._lock)", + "docstring": "Return a TracingOptions catered to the input function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:_generate_tracing_options arg:self arg:fn arg:scope_type arguments arg arg arg Assign Call Assign If Compare Assign If Compare Assign Call If Assign If Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "tuple", + "source_code": "@property\ndef tuple(self):\n return tuple((g.tuple for g in self))", + "docstring": "Return a tuple of all the coordinates in this Geometry Collection", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\collections.py", + "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_math_fontfamily", + "source_code": "def set_math_fontfamily(self, fontfamily):\n self._fontproperties.set_math_fontfamily(fontfamily)", + "docstring": "Set the font family for math text rendered by Matplotlib. This does only affect Matplotlib's own math renderer. It has no effect when rendering with TeX (`default matplotlibrc file `. See Also -------- get_math_fontfamily", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_math_fontfamily arg:self arg:fontfamily arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "read_string", + "source_code": "def read_string(self, name: str) -> str:\n data = self.read_bytes(name)\n return data.decode()", + "docstring": "Read a string object from the archive. name: The source file inside the archive.", + "type": "method", + "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py", + "ast_data": "FunctionDef name:read_string arg:self arg:name arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "fixed_config", + "source_code": "def fixed_config(config, filename, triton_meta, inductor_meta):\n config = {**config}\n return cached_autotune(None, [triton.Config(config, **_pop_config_kwargs(config))], triton_meta=triton_meta, inductor_meta=inductor_meta, heuristic_type=HeuristicType.FIXED, filename=filename)", + "docstring": "Used when the configuration is already decided at compile time", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:fixed_config arg:config arg:filename arg:triton_meta arg:inductor_meta arguments arg arg arg arg Assign Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "_cumulatively_sum_simpson_integrals", + "source_code": "def _cumulatively_sum_simpson_integrals(y: np.ndarray, dx: np.ndarray, integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray]) -> np.ndarray:\n sub_integrals_h1 = integration_func(y, dx)\n sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1]\n shape = list(sub_integrals_h1.shape)\n shape[-1] += 1\n sub_integrals = np.empty(shape)\n sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2]\n sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2]\n sub_integrals[..., -1] = sub_integrals_h2[..., -1]\n res = np.cumsum(sub_integrals, axis=-1)\n return res", + "docstring": "Calculate cumulative sum of Simpson integrals. Takes as input the integration function to be used. The integration_func is assumed to return the cumulative sum using composite Simpson's rule. Assumes the axis of summation is -1.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_quadrature.py", + "ast_data": "FunctionDef name:_cumulatively_sum_simpson_integrals arg:y arg:dx arg:integration_func arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_lookup_args", + "source_code": "def _lookup_args(self, args: tuple[Any, ...]) -> tuple[Any, ...]:\n return tuple((self.buffer_to_node[arg] if isinstance(arg, str) else arg.inner_expr if isinstance(arg, SymbolicCallArg) else arg for arg in args))", + "docstring": "Maps call args back to FX nodes.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py", + "ast_data": "FunctionDef name:_lookup_args arg:self arg:args arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "make_block", + "source_code": "@final\ndef make_block(self, values, placement: BlockPlacement | None=None, refs: BlockValuesRefs | None=None) -> Block:\n if placement is None:\n placement = self._mgr_locs\n if self.is_extension:\n values = ensure_block_shape(values, ndim=self.ndim)\n return new_block(values, placement=placement, ndim=self.ndim, refs=refs)", + "docstring": "Create a new block, with type inference propagate any values that are not specified", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:make_block arg:self arg:values arg:placement arg:refs arguments arg arg arg arg If Compare Assign If Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "WayburnSeader01", + "source_code": "class WayburnSeader01(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n self.custom_bounds = ([-2, 2], [-2, 2])\n self.global_optimum = [[1.0, 2.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return (x[0] ** 6 + x[1] ** 4 - 17) ** 2 + (2 * x[0] + x[1] - 4) ** 2", + "docstring": "Wayburn and Seader 1 objective function. This class defines the Wayburn and Seader 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{WayburnSeader01}}(x) = (x_1^6 + x_2^4 - 17)^2 + (2x_1 + x_2 - 4)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py", + "ast_data": "ClassDef name:WayburnSeader01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "decode_TEXT_maybe", + "source_code": "def decode_TEXT_maybe(value):\n return decode_TEXT(value) if '=?' in value else value", + "docstring": "Decode the text but only if '=?' appears in it.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:decode_TEXT_maybe arg:value arguments arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "reduce_euclidean_norm", + "source_code": "@tf_export('math.reduce_euclidean_norm')\n@dispatch.add_dispatch_support\ndef reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):\n keepdims = bool(keepdims)\n return _may_reduce_to_scalar(keepdims, axis, gen_math_ops.euclidean_norm(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name))", + "docstring": "Computes the Euclidean norm of elements across dimensions of a tensor. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:reduce_euclidean_norm arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "kornia", + "name": "_transform_output_shape", + "source_code": "def _transform_output_shape(output: Tensor, shape: Tuple[int, ...], *, reference_shape: Optional[Tensor]=None) -> Tensor:\n out_tensor = output.clone()\n for dim in range(len(out_tensor.shape) - len(shape)):\n idx = 0\n if reference_shape is not None and out_tensor.shape[0] == reference_shape[0] != 1 and (len(shape) > 2):\n idx = 1\n if out_tensor.shape[idx] != 1:\n raise AssertionError(f'Dimension {dim} of input is expected to be 1, got {out_tensor.shape[idx]}')\n out_tensor = out_tensor.squeeze(idx)\n return out_tensor", + "docstring": "Collapse the broadcasted batch dimensions an input tensor to be the specified shape. Args: output: Tensor shape: List/tuple of int reference_shape: Tensor representation of shape to control which dimensions are collapsed. Returns: Tensor", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py", + "ast_data": "FunctionDef name:_transform_output_shape arg:output arg:shape arguments arg arg arg Assign Call For Call Call Call Assign If BoolOp Compare Compare Compare Call Assign If Compare Raise Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "default_device", + "source_code": "def default_device(self):\n return 'cpu'", + "docstring": "The default device used for new NumPy arrays. For NumPy, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : str The default device used for new NumPy arrays. Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_device() 'cpu'", + "type": "method", + "file_path": "numpy\\numpy\\_array_api_info.py", + "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "convert_dtypes", + "source_code": "@final\ndef convert_dtypes(self, infer_objects: bool=True, convert_string: bool=True, convert_integer: bool=True, convert_boolean: bool=True, convert_floating: bool=True, dtype_backend: DtypeBackend='numpy_nullable') -> Self:\n check_dtype_backend(dtype_backend)\n new_mgr = self._mgr.convert_dtypes(infer_objects=infer_objects, convert_string=convert_string, convert_integer=convert_integer, convert_boolean=convert_boolean, convert_floating=convert_floating, dtype_backend=dtype_backend)\n res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)\n return res.__finalize__(self, method='convert_dtypes')", + "docstring": "Convert columns from numpy dtypes to the best dtypes that support `convert_integerDataFrameSeriesDataFrameSeriresArrowDtypeDataFrameSeries`. >>> s.convert_dtypes() 0 a 1 b 2 dtype: string", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:convert_dtypes arg:self arg:infer_objects arg:convert_string arg:convert_integer arg:convert_boolean arg:convert_floating arg:dtype_backend arguments arg arg arg arg arg arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "adjust_brightness", + "source_code": "@tf_export('image.adjust_brightness')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef adjust_brightness(image, delta):\n with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:\n image = ops.convert_to_tensor(image, name='image')\n orig_dtype = image.dtype\n if orig_dtype in [dtypes.float16, dtypes.float32]:\n flt_image = image\n else:\n flt_image = convert_image_dtype(image, dtypes.float32)\n adjusted = math_ops.add(flt_image, math_ops.cast(delta, flt_image.dtype), name=name)\n return convert_image_dtype(adjusted, orig_dtype, saturate=True)", + "docstring": "Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value is added to all components of the tensor . is converted to and scaled appropriately if it is in fixed-point representation, and is converted to the same data type. For regular images, should be in the range , as it is added to the image in floating point representation, where pixel values are in the range. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_brightness(x, delta=0.1) Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:adjust_brightness arg:image arg:delta arguments arg arg With Call Assign Call Assign If Compare Assign Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "create_partition", + "source_code": "def create_partition(self) -> Partition:\n partition_id = len(self.partitions)\n partition = Partition(partition_id)\n self.partitions.append(partition)\n return partition", + "docstring": "Create a partition and append it to self.partitions.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:create_partition arg:self arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "Context", + "source_code": "class Context(BaseContext):\n\n def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None):\n self.autoescape = autoescape\n self.use_l10n = use_l10n\n self.use_tz = use_tz\n self.template_name = 'unknown'\n self.render_context = RenderContext()\n self.template = None\n super().__init__(dict_)\n\n @contextmanager\n def bind_template(self, template):\n if self.template is not None:\n raise RuntimeError('Context is already bound to a template')\n self.template = template\n try:\n yield\n finally:\n self.template = None\n\n def __copy__(self):\n duplicate = super().__copy__()\n duplicate.render_context = copy(self.render_context)\n return duplicate\n\n def update(self, other_dict):\n if not hasattr(other_dict, '__getitem__'):\n raise TypeError('other_dict must be a mapping (dictionary-like) object.')\n if isinstance(other_dict, BaseContext):\n other_dict = other_dict.dicts[1:].pop()\n return ContextDict(self, other_dict)", + "docstring": "A stack container for variable context", + "type": "class", + "file_path": "django\\django\\template\\context.py", + "ast_data": "ClassDef name:Context FunctionDef name:__init__ arg:self arg:dict_ arg:autoescape arg:use_l10n arg:use_tz arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Call FunctionDef name:bind_template arg:self arg:template arguments arg arg If Compare Raise Call Assign Try Assign FunctionDef name:__copy__ arg:self arguments arg Assign Call Call Assign Call Return return:yes FunctionDef name:update arg:self arg:other_dict arguments arg arg If Call Raise Call If Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "WORMTable", + "source_code": "class WORMTable(Table):\n table_type = 'worm'\n\n def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None):\n raise NotImplementedError('WORMTable needs to implement read')\n\n def write(self, obj, **kwargs) -> None:\n raise NotImplementedError('WORMTable needs to implement write')", + "docstring": "a write-once read-many table: this format DOES NOT ALLOW appending to a table. writing is a one-time operation the data are stored in a format that allows for searching the data on disk", + "type": "class", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "ClassDef name:WORMTable Assign FunctionDef name:read arg:self arg:where arg:columns arg:start arg:stop arguments arg arg arg arg arg Raise Call FunctionDef name:write arg:self arg:obj arguments arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "line_segment_transfer_error_one_way", + "source_code": "def line_segment_transfer_error_one_way(ls1: Tensor, ls2: Tensor, H: Tensor, squared: bool=False) -> Tensor:\n KORNIA_CHECK_SHAPE(H, ['B', '3', '3'])\n KORNIA_CHECK_SHAPE(ls1, ['B', 'N', '2', '2'])\n KORNIA_CHECK_SHAPE(ls2, ['B', 'N', '2', '2'])\n B, N = ls1.shape[:2]\n ps1, pe1 = torch.chunk(ls1, dim=2, chunks=2)\n ps2, pe2 = torch.chunk(ls2, dim=2, chunks=2)\n ps2_h = convert_points_to_homogeneous(ps2)\n pe2_h = convert_points_to_homogeneous(pe2)\n ln2 = ps2_h.cross(pe2_h, dim=3)\n ps1_in2 = convert_points_to_homogeneous(transform_points(H, ps1))\n pe1_in2 = convert_points_to_homogeneous(transform_points(H, pe1))\n er_st1 = (ln2 @ ps1_in2.transpose(-2, -1)).view(B, N).abs()\n er_end1 = (ln2 @ pe1_in2.transpose(-2, -1)).view(B, N).abs()\n error = 0.5 * (er_st1 + er_end1)\n if squared:\n error = error ** 2\n return error", + "docstring": "Return transfer error in image 2 for line segment correspondences given the homography matrix. Line segment end points are reprojected into image 2, and point-to-line error is calculated w.r.t. line, induced by line segment in image 2. See :cite: for details. Args: ls1: line segment correspondences from the left images with shape (B, N, 2, 2). ls2: line segment correspondences from the right images with shape (B, N, 2, 2). H: Homographies with shape :math:. squared: if True (default is False), the squared distance is returned. Returns: the computed distance with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\homography.py", + "ast_data": "FunctionDef name:line_segment_transfer_error_one_way arg:ls1 arg:ls2 arg:H arg:squared arguments arg arg arg arg Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Assign If Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "rot90", + "source_code": "@register_decomposition(aten.rot90)\n@out_wrapper()\ndef rot90(a: TensorLikeType, k: int=1, dims: DimsSequenceType=(0, 1)) -> TensorLikeType:\n if len(dims) != 2:\n raise RuntimeError(f'expected total rotation dims == 2, but got dims = {len(dims)}')\n if a.ndim < 2:\n raise RuntimeError(f'expected total dims >= 2, but got total dims = {a.ndim}')\n dims = utils.canonicalize_dims(a.ndim, dims)\n if dims[0] == dims[1]:\n raise RuntimeError(f'expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}')\n k = k % 4\n if k == 1:\n return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1])\n elif k == 2:\n return torch.flip(a, dims)\n elif k == 3:\n return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1])\n else:\n return a.clone(memory_format=torch.contiguous_format)", + "docstring": "Reference implementation of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:rot90 arg:a arg:k arg:dims arguments arg arg arg If Compare Call Raise Call Call If Compare Raise Call Assign Call If Compare Raise Call Assign If Compare Return return:yes Call Call If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n transformer_with_feature_names_out = []\n for name, trans, _ in self._iter():\n if not hasattr(trans, 'get_feature_names_out'):\n raise AttributeError('Transformer %s (type %s) does not provide get_feature_names_out.' % (str(name), type(trans).__name__))\n feature_names_out = trans.get_feature_names_out(input_features)\n transformer_with_feature_names_out.append((name, feature_names_out))\n return self._add_prefix_for_feature_names_out(transformer_with_feature_names_out)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Assign For Call If Call Raise Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "NullKernelHandler", + "source_code": "class NullKernelHandler(NullHandler):\n\n def __init__(self):\n super().__init__()\n self.removed_buffers = OrderedSet[Any]()\n self.inplaced_to_remove = OrderedSet[Any]()\n self.index_dtype = 'tl.int64'\n\n def get_index_dtype_as_torch_dtype(self):\n import torch\n if self.index_dtype == 'tl.int64':\n return torch.int64\n elif self.index_dtype == 'tl.int32':\n return torch.int32\n else:\n raise ValueError(f'Unknown dtype: {self.index_dtype}')", + "docstring": "We need access in DeferredLine class when there is no kernel in the context. This happens when codegening the wrapper. Initialize and explicitly so we don't need call 'getattr' with default value which is error prone to typo in attribute name.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\virtualized.py", + "ast_data": "ClassDef name:NullKernelHandler FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign Call Assign FunctionDef name:get_index_dtype_as_torch_dtype arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "django", + "name": "_model_indexes_sql", + "source_code": "def _model_indexes_sql(self, model):\n if not model._meta.managed or model._meta.proxy or model._meta.swapped:\n return []\n output = []\n for field in model._meta.local_fields:\n output.extend(self._field_indexes_sql(model, field))\n for index in model._meta.indexes:\n if not index.contains_expressions or self.connection.features.supports_expression_indexes:\n output.append(index.create_sql(model, self))\n return output", + "docstring": "Return a list of all index SQL statements (field indexes, Meta.indexes) for the specified model.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:_model_indexes_sql arg:self arg:model arguments arg arg If BoolOp Return return:no Assign For Call Call For If BoolOp Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "RgbToRaw", + "source_code": "class RgbToRaw(Module):\n ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n\n def __init__(self, cfa: CFA) -> None:\n super().__init__()\n self.cfa = cfa\n\n def forward(self, image: torch.Tensor) -> torch.Tensor:\n return rgb_to_raw(image, cfa=self.cfa)", + "docstring": "Module to convert a RGB image to bayer raw version of image. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: reference: Example: >>> rgbinput = torch.rand(2, 3, 4, 6) >>> raw = RgbToRaw(CFA.GB) >>> output = raw(rgbinput) # 2x1x4x6", + "type": "class", + "file_path": "kornia\\kornia\\color\\raw.py", + "ast_data": "ClassDef name:RgbToRaw FunctionDef name:__init__ arg:self arg:cfa arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_managed_param_to_fqn", + "source_code": "def _get_managed_param_to_fqn(module_to_wrap: nn.Module, ignored_params: set[nn.Parameter], visited_modules: set[nn.Module], root_prefix: str) -> dict[nn.Parameter, str]:\n param_to_fqn: dict[nn.Parameter, str] = {}\n queue = collections.deque([(module_to_wrap, root_prefix)])\n visited_modules.add(module_to_wrap)\n while queue:\n module, prefix = queue.popleft()\n for param_name, param in module.named_parameters(recurse=False):\n if param not in ignored_params:\n fqn = param_name if prefix == '' else prefix + '.' + param_name\n param_to_fqn[param] = fqn\n for child_module_name, child_module in module.named_children():\n if child_module is None:\n continue\n if child_module not in visited_modules:\n visited_modules.add(child_module)\n child_prefix = child_module_name if prefix == '' else prefix + '.' + child_module_name\n queue.append((child_module, child_prefix))\n return param_to_fqn", + "docstring": "This returns a dict that maps managed parameter to its FQN for the given `` function meant to be called post-wrapping and on the full module tree in one shot. Given those differences, we do not try to unify the two.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_wrap_utils.py", + "ast_data": "FunctionDef name:_get_managed_param_to_fqn arg:module_to_wrap arg:ignored_params arg:visited_modules arg:root_prefix arguments arg arg arg arg Assign Call Call While Assign Call For Call If Compare Assign Compare Assign For Call If Compare If Compare Call Assign Compare Call Return return:yes" + }, + { + "library": "numpy", + "name": "_binary_method", + "source_code": "def _binary_method(ufunc, name):\n\n def func(self, other):\n if _disables_array_ufunc(other):\n return NotImplemented\n return ufunc(self, other)\n func.__name__ = f'__{name}__'\n return func", + "docstring": "Implement a forward binary method with a ufunc, e.g., __add__.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\mixins.py", + "ast_data": "FunctionDef name:_binary_method arg:ufunc arg:name arguments arg arg FunctionDef name:func arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "VariableSynchronization", + "source_code": "@tf_export('VariableSynchronization')\nclass VariableSynchronization(enum.Enum):\n AUTO = 0\n NONE = 1\n ON_WRITE = 2\n ON_READ = 3", + "docstring": "Indicates when a distributed variable will be synced. * : Indicates that the synchronization will be determined by the current (eg. With this would be ). * : Indicates that there will only be one copy of the variable, so there is no need to sync. * : Indicates that the variable will be updated across devices every time it is written. * : Indicates that the variable will be aggregated across devices when it is read (eg. when checkpointing or when evaluating an op that uses the variable). Example: >>> temp_grad=[tf.Variable([0.], trainable=False, ... synchronization=tf.VariableSynchronization.ON_READ, ... aggregation=tf.VariableAggregation.MEAN ... )]", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "ClassDef name:VariableSynchronization Assign Assign Assign Assign Call" + }, + { + "library": "pandas", + "name": "_any_pandas_objects", + "source_code": "def _any_pandas_objects(terms) -> bool:\n return any((isinstance(term.value, PandasObject) for term in terms))", + "docstring": "Check a sequence of terms for instances of PandasObject.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\align.py", + "ast_data": "FunctionDef name:_any_pandas_objects arg:terms arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "boxcar", + "source_code": "def boxcar(M, sym=True, *, xp=None, device=None):\n xp = _namespace(xp)\n if _len_guards(M):\n return xp.ones(M, dtype=xp.float64, device=device)\n M, needs_trunc = _extend(M, sym)\n w = xp.ones(M, dtype=xp.float64, device=device)\n return _truncate(w, needs_trunc)", + "docstring": "Return a boxcar or rectangular window. Also known as a rectangular window or Dirichlet window, this is equivalent to no window at all. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional Whether the window is symmetric. (Has no effect for boxcar.) %(xp_device_snippet)s Returns ------- w : ndarray The window, with the maximum value normalized to 1. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.boxcar(51) >>> plt.plot(window) >>> plt.title(\"Boxcar window\") >>> plt.ylabel(\"Amplitude\") >>> plt.xlabel(\"Sample\") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(\"Frequency response of the boxcar window\") >>> plt.ylabel(\"Normalized magnitude [dB]\") >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")", + "type": "function", + "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", + "ast_data": "FunctionDef name:boxcar arg:M arg:sym arguments arg arg arg arg Assign Call If Call Return return:yes Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "function_callback", + "source_code": "def function_callback(self, function):\n graph_id = self._get_context_id(function.graph)\n with self._context_lock:\n self._function_to_graph_id[function] = graph_id", + "docstring": "A callback to be called on creation of ConcreteFunctions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", + "ast_data": "FunctionDef name:function_callback arg:self arg:function arguments arg arg Assign Call With Assign" + }, + { + "library": "django", + "name": "read", + "source_code": "def read(self, wkb):\n return GEOSGeometry(super().read(wkb))", + "docstring": "Return a GEOSGeometry for the given WKB buffer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\io.py", + "ast_data": "FunctionDef name:read arg:self arg:wkb arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_register_pre_backward_hooks", + "source_code": "@no_type_check\ndef _register_pre_backward_hooks(state: _FSDPState, module: nn.Module, outputs: Any, handle: FlatParamHandle) -> None:\n if not torch.is_grad_enabled():\n return outputs\n if state._is_root:\n state._post_backward_callback_queued = False\n if handle:\n handle._needs_pre_backward_unshard = False\n handle._ran_pre_backward_hook = False\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(torch.utils.hooks.unserializable_hook(functools.partial(_pre_backward_hook, state, module, handle)))\n if handle:\n handle._needs_pre_backward_unshard = True\n return t\n return _apply_to_tensors(_register_hook, outputs)", + "docstring": "Registers pre-backward hooks on the tensors that require gradients in the forward pass outputs ``. Args: module (nn.Module): Fully sharded module (see [Note: Fully Sharded Module]). Returns: Forward pass outputs with pre-backward hooks registered to tensors that require gradients.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_register_pre_backward_hooks arg:state arg:module arg:outputs arg:handle arguments arg arg arg arg If Call Return return:yes If Assign If Assign Assign FunctionDef name:_register_hook arg:t arguments arg If Call Call Call If Assign Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_in_multi_worker_mode", + "source_code": "def _in_multi_worker_mode(self):\n strategy = self._distribution_strategy\n if not strategy and distribute_lib.has_strategy():\n strategy = distribute_lib.get_strategy()\n return strategy and strategy.extended._in_multi_worker_mode()", + "docstring": "Method to infer if this is working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Experimental. Signature and implementation are subject to change. Returns: Whether this model indicates it's working in multi-worker settings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Assign If BoolOp Call Assign Call Return return:yes BoolOp Call" + }, + { + "library": "pytorch", + "name": "symbolic_trace", + "source_code": "@compatibility(is_backward_compatible=True)\ndef symbolic_trace(root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[dict[str, Any]]=None) -> GraphModule:\n tracer = Tracer()\n graph = tracer.trace(root, concrete_args)\n name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__\n return _make_graph_module(tracer.root, graph, name)", + "docstring": "Symbolic tracing API Given an `concrete_argsbbconcrete_argsfx.PH`.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:symbolic_trace arg:root arg:concrete_args arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "copy_to_graph_uninitialized", + "source_code": "def copy_to_graph_uninitialized(var):\n new_variable = UninitializedVariable(trainable=var.trainable, constraint=var._constraint, shape=var.shape, dtype=var.dtype, name=var._shared_name, synchronization=var.synchronization, aggregation=var.aggregation, extra_handle_data=var.handle)\n new_variable._maybe_initialize_trackable()\n return new_variable", + "docstring": "Copies an existing variable to a new graph, with no initializer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:copy_to_graph_uninitialized arg:var arguments arg Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "aps12_f", + "source_code": "def aps12_f(x, n):\n return np.power(x, 1.0 / n) - np.power(n, 1.0 / n)", + "docstring": "nth root of x, with a zero at x=n", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps12_f arg:x arg:n arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "hermite", + "source_code": "def hermite(n, monic=False):\n if n < 0:\n raise ValueError('n must be nonnegative.')\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w = roots_hermite(n1)\n\n def wfunc(x):\n return exp(-x * x)\n if n == 0:\n x, w = ([], [])\n hn = 2 ** n * _gam(n + 1) * sqrt(pi)\n kn = 2 ** n\n p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, lambda x: _ufuncs.eval_hermite(n, x))\n return p", + "docstring": "Physicist's Hermite polynomial. Defined by .. math:: H_n(x) = (-1)^ne^{x^2}\\frac{d^n}{dx^n}e^{-x^2}; :math: is a polynomial of degree :math:. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- H : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:. Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> import numpy as np >>> p_monic = special.hermite(3, monic=True) >>> p_monic poly1d([ 1. , 0. , -1.5, 0. ]) >>> p_monic(1) -0.49999999999999983 >>> x = np.linspace(-3, 3, 400) >>> y = p_monic(x) >>> plt.plot(x, y) >>> plt.title(\"Monic Hermite polynomial of degree 3\") >>> plt.xlabel(\"x\") >>> plt.ylabel(\"H_3(x)\") >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:hermite arg:n arg:monic arguments arg arg If Compare Raise Call If Compare Assign Assign Assign Call FunctionDef name:wfunc arg:x arguments arg Return return:yes Call If Compare Assign Assign Call Call Assign Assign Call arguments arg Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_SnapshotState", + "source_code": "class _SnapshotState(Enum):\n NotStarted = 0\n Restored = 1\n Iterating = 2", + "docstring": "These are the snapshotting-related states that IterDataPipes can be in. - allows you to restore a snapshot and create an iterator with reset - cannot restore again, allows you to create an iterator without resetting the DataPipe - can restore, will reset if you create a new iterator", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_hook_iterator.py", + "ast_data": "ClassDef name:_SnapshotState Assign Assign Assign" + }, + { + "library": "kornia", + "name": "transform_keypoints", + "source_code": "def transform_keypoints(self, input: Union[Tensor, Keypoints], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Keypoints]:\n if isinstance(input, Tensor):\n batchsize, frame_num = (input.size(0), input.size(1))\n input = Keypoints(input.view(-1, input.size(2), input.size(3)))\n input = super().transform_keypoints(input, params, extra_args=extra_args)\n input = input.data.view(batchsize, frame_num, -1, 2)\n else:\n input = super().transform_keypoints(input, params, extra_args=extra_args)\n return input", + "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:transform_keypoints arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n xx, is_scalar = self.process_value(value)\n mask = np.ma.getmaskarray(xx)\n xx = np.atleast_1d(xx.filled(self.vmax + 1))\n if clip:\n np.clip(xx, self.vmin, self.vmax, out=xx)\n max_col = self.Ncmap - 1\n else:\n max_col = self.Ncmap\n iret = np.digitize(xx, self.boundaries) - 1 + self._offset\n if self.Ncmap > self._n_regions:\n if self._n_regions == 1:\n iret[iret == 0] = (self.Ncmap - 1) // 2\n else:\n iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret\n iret = iret.astype(np.int16)\n iret[xx < self.vmin] = -1\n iret[xx >= self.vmax] = max_col\n ret = np.ma.array(iret, mask=mask)\n if is_scalar:\n ret = int(ret[0])\n return ret", + "docstring": "This method behaves similarly to , except that it returns integers or arrays of int16.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg If Compare Assign Assign Call Assign Call Assign Call Call If Call Assign Assign Assign Call If Compare If Compare Assign Compare Assign Assign Call Assign Compare Assign Compare Assign Call If Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_add_tool_cbk", + "source_code": "def _add_tool_cbk(self, event):\n if getattr(event.tool, 'cursor', None) is not None:\n self.toolmanager.toolmanager_connect(f'tool_trigger_{event.tool.name}', self._tool_trigger_cbk)", + "docstring": "Process every newly added tool.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:_add_tool_cbk arg:self arg:event arguments arg arg If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "major_minor_change", + "source_code": "def major_minor_change(old_version, new_version):\n major_mismatch = old_version.major != new_version.major\n minor_mismatch = old_version.minor != new_version.minor\n if major_mismatch or minor_mismatch:\n return True\n return False", + "docstring": "Check if a major or minor change occurred.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py", + "ast_data": "FunctionDef name:major_minor_change arg:old_version arg:new_version arguments arg arg Assign Compare Assign Compare If BoolOp Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_SummaryContextManager", + "source_code": "class _SummaryContextManager:\n\n def __init__(self, writer, step=None):\n self._writer = writer\n self._step = step\n self._old_writer = None\n self._old_step = None\n\n def __enter__(self):\n self._old_writer = _summary_state.writer\n _summary_state.writer = self._writer\n if self._step is not None:\n self._old_step = _summary_state.step\n _summary_state.step = self._step\n return self._writer\n\n def __exit__(self, *exc):\n _summary_state.writer.flush()\n _summary_state.writer = self._old_writer\n if self._step is not None:\n _summary_state.step = self._old_step\n return False", + "docstring": "Context manager to implement SummaryWriter.as_default().", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "ClassDef name:_SummaryContextManager FunctionDef name:__init__ arg:self arg:writer arg:step arguments arg arg arg Assign Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Assign If Compare Assign Assign Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg Call Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "object_graph_key_mapping", + "source_code": "def object_graph_key_mapping(checkpoint_path):\n reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)\n object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)\n object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n object_graph_proto.ParseFromString(object_graph_string)\n names_to_keys = {}\n for node in object_graph_proto.nodes:\n for attribute in node.attributes:\n names_to_keys[attribute.full_name] = attribute.checkpoint_key\n return names_to_keys", + "docstring": "Return name to key mappings from the checkpoint. Args: checkpoint_path: string, path to object-based checkpoint Returns: Dictionary mapping tensor names to checkpoint keys.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:object_graph_key_mapping arg:checkpoint_path arguments arg Assign Call Assign Call Assign Call Call Assign For For Assign Return return:yes" + }, + { + "library": "django", + "name": "FieldGetDbPrepValueMixin", + "source_code": "class FieldGetDbPrepValueMixin:\n get_db_prep_lookup_value_is_iterable = False\n\n def get_db_prep_lookup(self, value, connection):\n field = getattr(self.lhs.output_field, 'target_field', None)\n get_db_prep_value = getattr(field, 'get_db_prep_value', None) or self.lhs.output_field.get_db_prep_value\n if not self.get_db_prep_lookup_value_is_iterable:\n value = [value]\n return ('%s', [v if hasattr(v, 'as_sql') else get_db_prep_value(v, connection, prepared=True) for v in value])", + "docstring": "Some lookups require Field.get_db_prep_value() to be called on their inputs.", + "type": "class", + "file_path": "django\\django\\db\\models\\lookups.py", + "ast_data": "ClassDef name:FieldGetDbPrepValueMixin Assign FunctionDef name:get_db_prep_lookup arg:self arg:value arg:connection arguments arg arg arg Assign Call Assign BoolOp Call If Assign Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "fourier_shift", + "source_code": "def fourier_shift(input, shift, n=-1, axis=-1, output=None):\n input = np.asarray(input)\n output = _get_output_fourier_complex(output, input)\n axis = normalize_axis_index(axis, input.ndim)\n shifts = _ni_support._normalize_sequence(shift, input.ndim)\n shifts = np.asarray(shifts, dtype=np.float64)\n if not shifts.flags.contiguous:\n shifts = shifts.copy()\n _nd_image.fourier_shift(input, shifts, n, axis, output)\n return output", + "docstring": "Multidimensional Fourier shift filter. The array is multiplied with the Fourier transform of a shift operation. Parameters ---------- input : array_like The input array. shift : float or sequence The size of the box used for filtering. If a float, is the same for all axes. If a sequence, has to contain one value for each axis. n : int, optional If is negative (default), then the input is assumed to be the result of a complex fft. If is larger than or equal to zero, the input is assumed to be the result of a real fft, and gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of shifting the input is placed in this array. Returns ------- fourier_shift : ndarray The shifted input. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> import numpy.fft >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_shift(input_, shift=200) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_fourier.py", + "ast_data": "FunctionDef name:fourier_shift arg:input arg:shift arg:n arg:axis arg:output arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call If Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_deepcopy", + "source_code": "def _deepcopy(el: Element) -> Element:\n newnode = el.copy()\n newnode.children = [child.deepcopy() for child in el.children]\n for child in newnode.children:\n child.parent = newnode\n if el.document:\n child.document = el.document\n if child.source is None:\n child.source = el.document.current_source\n if child.line is None:\n child.line = el.document.current_line\n return newnode", + "docstring": "Monkey-patch for speed.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\nodes.py", + "ast_data": "FunctionDef name:_deepcopy arg:el arguments arg Assign Call Assign Call For Assign If Assign If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "write", + "source_code": "def write(self, save_path, options=None):\n return self._write(save_path, options)", + "docstring": "Save the checkpointed variables. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py", + "ast_data": "FunctionDef name:write arg:self arg:save_path arg:options arguments arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None):\n if not hasattr(self, 'classes_'):\n self._more_validate_params(for_partial_fit=True)\n if self.class_weight == 'balanced':\n raise ValueError(\"class_weight 'balanced' is not supported for partial_fit. For 'balanced' weights, use `sklearn.utils.compute_class_weight` with `class_weight='balanced'`. In place of y you can use a large enough subset of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.\")\n lr = 'pa1' if self.loss == 'hinge' else 'pa2'\n return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, max_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None)", + "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of the training data. y : array-like of shape (n_samples,) Subset of the target values. classes : ndarray of shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via , where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in . Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arguments arg arg arg arg If Call Call If Compare Raise Call Assign Compare Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_assert_all_paths_match", + "source_code": "def _assert_all_paths_match(values):\n paths = [_get_all_paths(st) for st in values]\n path_diff = set()\n for other_paths in paths[1:]:\n path_diff = path_diff.union(paths[0].symmetric_difference(other_paths))\n if path_diff:\n raise ValueError('Some paths are present in some, but not all, structured tensors: %r' % (path_diff,))", + "docstring": "Raises an error if the paths are not identical.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_assert_all_paths_match arg:values arguments arg Assign Call Assign Call For Assign Call Call If Raise Call" + }, + { + "library": "scikit-learn", + "name": "_most_frequent", + "source_code": "def _most_frequent(array, extra_value, n_repeat):\n if array.size > 0:\n if array.dtype == object:\n counter = Counter(array)\n most_frequent_count = counter.most_common(1)[0][1]\n most_frequent_value = min((value for value, count in counter.items() if count == most_frequent_count))\n else:\n mode = _mode(array)\n most_frequent_value = mode[0][0]\n most_frequent_count = mode[1][0]\n else:\n most_frequent_value = 0\n most_frequent_count = 0\n if most_frequent_count == 0 and n_repeat == 0:\n return np.nan\n elif most_frequent_count < n_repeat:\n return extra_value\n elif most_frequent_count > n_repeat:\n return most_frequent_value\n elif most_frequent_count == n_repeat:\n return min(most_frequent_value, extra_value)", + "docstring": "Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\impute\\_base.py", + "ast_data": "FunctionDef name:_most_frequent arg:array arg:extra_value arg:n_repeat arguments arg arg arg If Compare If Compare Assign Call Assign Call Assign Call Call Compare Assign Call Assign Assign Assign Assign If BoolOp Compare Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Call" + }, + { + "library": "authlib", + "name": "register_nonce_hooks", + "source_code": "def register_nonce_hooks(authorization_server, cache, key_prefix='nonce:', expires=86400):\n exists_nonce = create_exists_nonce_func(cache, key_prefix, expires)\n authorization_server.register_hook('exists_nonce', exists_nonce)", + "docstring": "Register nonce related hooks to authorization server. :param authorization_server: AuthorizationServer instance :param cache: Cache instance :param key_prefix: key prefix for temporary credential :param expires: Expire time for nonce", + "type": "function", + "file_path": "authlib\\authlib\\integrations\\flask_oauth1\\cache.py", + "ast_data": "FunctionDef name:register_nonce_hooks arg:authorization_server arg:cache arg:key_prefix arg:expires arguments arg arg arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "InputDim", + "source_code": "@dataclass\nclass InputDim(DimSpec):\n input_dim: int", + "docstring": "Output dimension maps directly to an input dimension.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py", + "ast_data": "ClassDef name:InputDim" + }, + { + "library": "pytorch", + "name": "first_path", + "source_code": "def first_path(self, dst: str) -> list[str]:\n path = []\n while dst:\n path.append(dst)\n candidates = self._pred[dst].keys()\n dst, min_idx = ('', None)\n for candidate in candidates:\n idx = self._node_order.get(candidate, None)\n if idx is None:\n break\n if min_idx is None or idx < min_idx:\n min_idx = idx\n dst = candidate\n return list(reversed(path))", + "docstring": "Returns a list of nodes that show the first path that resulted in dst being added to the graph.", + "type": "method", + "file_path": "pytorch\\torch\\package\\_digraph.py", + "ast_data": "FunctionDef name:first_path arg:self arg:dst arguments arg arg Assign While Call Assign Call Assign For Assign Call If Compare If BoolOp Compare Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "shear_x", + "source_code": "def shear_x(min_mag: float, max_mag: float) -> OperationBase:\n if min_mag != -max_mag:\n raise ValueError(f'{ShearX.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n return ShearX(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)", + "docstring": "Return ShearX op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py", + "ast_data": "FunctionDef name:shear_x arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__copy__", + "source_code": "def __copy__(self):\n newmap = self.__class__()\n for k, v in self.items():\n newmap[k] = v[:]\n return newmap", + "docstring": "Duplicate object per the copy protocol.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cprequest.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg Assign Call For Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "thunkify", + "source_code": "def thunkify(tracer: _ProxyTracer, f: Callable[_P, R], *args: _P.args, **kwargs: _P.kwargs) -> Thunk[R]:\n if tracer.enable_thunkify:\n return Thunk(functools.partial(f, *args, **kwargs))\n else:\n r = f(*args, **kwargs)\n return Thunk(lambda: r)", + "docstring": "Delays computation of f until it's called again Also caches the result", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py", + "ast_data": "FunctionDef name:thunkify arg:tracer arg:f arguments arg arg arg arg If Return return:yes Call Call Assign Call Return return:yes Call arguments" + }, + { + "library": "tensorflow", + "name": "_maybe_colocate_with", + "source_code": "@contextlib.contextmanager\ndef _maybe_colocate_with(self, value):\n if not self._colocate_with_first_write_call:\n yield\n else:\n if not self._colocate_with:\n self._colocate_with.append(value)\n with ops.colocate_with(self._colocate_with[0]):\n yield", + "docstring": "Colocate operations with an internal colocation group or . Args: value: , the tensor to try to colocate with. Yields: Does not yield anything, but the new context is a colocation context. If no internal colocation group is set, colocate with and set the internal colocation group to be value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:_maybe_colocate_with arg:self arg:value arguments arg arg If If Call With Call" + }, + { + "library": "pytorch", + "name": "add_leaf_node", + "source_code": "def add_leaf_node(self, leaf_node: _LeafNode) -> None:\n if self.is_same_module_as(leaf_node) or leaf_node.fx_op == 'call_module':\n self._nodes.append(leaf_node)\n elif leaf_node.fx_op == 'placeholder':\n self._nodes.append(leaf_node)\n elif self.is_parent_module_of(leaf_node):\n last_node = self._nodes[-1] if self._nodes else None\n if isinstance(last_node, _ModuleNode) and (last_node.is_parent_module_of(leaf_node) or last_node.is_same_module_as(leaf_node)):\n last_node.add_leaf_node(leaf_node)\n else:\n stack_meta = copy.deepcopy(self.stack_meta)\n stack_meta.push(leaf_node.stack_meta[len(self.stack_meta)])\n last_node = _ModuleNode(self._reference_module, stack_meta)\n self._nodes.append(last_node)\n last_node.add_leaf_node(leaf_node)\n else:\n raise AssertionError(f'Node {leaf_node} ({leaf_node.stack_meta}) does not belong to module {self._stack_meta}.')", + "docstring": "Adds a leaf node to the module. The leaf node must belong to the same or a child module. This method will recursively construct _ModuleNode instance based on the stack_meta information of the leaf node.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:add_leaf_node arg:self arg:leaf_node arguments arg arg If BoolOp Call Compare Call If Compare Call If Call Assign If BoolOp Call BoolOp Call Call Call Assign Call Call Call Assign Call Call Call Raise Call" + }, + { + "library": "scikit-learn", + "name": "apply", + "source_code": "def apply(self, X):\n self._check_initialized()\n X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n n_estimators, n_classes = self.estimators_.shape\n leaves = np.zeros((X.shape[0], n_estimators, n_classes))\n for i in range(n_estimators):\n for j in range(n_classes):\n estimator = self.estimators_[i, j]\n leaves[:, i, j] = estimator.apply(X, check_input=False)\n return leaves", + "docstring": "Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- X_leaves : array-like of shape (n_samples, n_estimators, n_classes) For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. In the case of binary classification n_classes is 1.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:apply arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call For Call For Call Assign Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "get_redirect_target", + "source_code": "def get_redirect_target(self, resp: requests.Response) -> str | None:\n if resp.is_redirect:\n destination = urljoin(resp.url, resp.headers['location'])\n if any((pat.match(destination) for pat in self._ignored_redirects)):\n raise _IgnoredRedirection(destination=destination, status_code=resp.status_code)\n return super().get_redirect_target(resp)", + "docstring": "Overrides the default requests.Session.get_redirect_target", + "type": "method", + "file_path": "sphinx\\sphinx\\util\\requests.py", + "ast_data": "FunctionDef name:get_redirect_target arg:self arg:resp arguments arg arg If Assign Call If Call Call Raise Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_gapcolor", + "source_code": "def set_gapcolor(self, gapcolor):\n if gapcolor is not None:\n mcolors._check_color_like(color=gapcolor)\n self._gapcolor = gapcolor\n self.stale = True", + "docstring": "Set a color to fill the gaps in the dashed line style. .. note:: Striped lines are created by drawing two interleaved dashed lines. There can be overlaps between those two, which may result in artifacts when using transparency. This functionality is experimental and may change. Parameters ---------- gapcolor : :mpltype: or None The color with which to fill the gaps. If None, the gaps are unfilled.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_gapcolor arg:self arg:gapcolor arguments arg arg If Compare Call Assign Assign" + }, + { + "library": "pandas", + "name": "maybe_expression", + "source_code": "def maybe_expression(s) -> bool:\n if not isinstance(s, str):\n return False\n operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ('=',)\n return any((op in s for op in operations))", + "docstring": "loose checking if s is a pytables-acceptable expression", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:maybe_expression arg:s arguments arg If Call Return return:yes Assign Return return:yes Call Compare" + }, + { + "library": "pandas", + "name": "infer_filename", + "source_code": "def infer_filename(self) -> str | None:\n if self.name is None:\n return None\n filename = Path(self.name)\n if filename.suffix == '.tar':\n return filename.with_suffix('').name\n elif filename.suffix in ('.tar.gz', '.tar.bz2', '.tar.xz'):\n return filename.with_suffix('').with_suffix('').name\n return filename.name", + "docstring": "If an explicit archive_name is not given, we still want the file inside the zip file not to be named something.tar, because that causes confusion (GH39465).", + "type": "method", + "file_path": "pandas\\pandas\\io\\common.py", + "ast_data": "FunctionDef name:infer_filename arg:self arguments arg If Compare Return return:no Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "shape_v2", + "source_code": "@dispatch.dispatch_for_api(array_ops.shape_v2)\ndef shape_v2(input: StructuredTensor, out_type=dtypes.int32, name=None) -> dynamic_ragged_shape.DynamicRaggedShape:\n del name\n return input._ragged_shape.with_dtype(out_type)", + "docstring": "Returns a DynamicRaggedShape containing the shape of the input.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:shape_v2 arg:input arg:out_type arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "combined_commuting_self_adjoint_hint", + "source_code": "def combined_commuting_self_adjoint_hint(operator_a, operator_b):\n if operator_a.is_self_adjoint and operator_b.is_self_adjoint:\n return True\n if operator_a.is_self_adjoint is True and operator_b.is_self_adjoint is False or (operator_a.is_self_adjoint is False and operator_b.is_self_adjoint is True):\n return False\n return None", + "docstring": "Get combined hint for self-adjoint-ness.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py", + "ast_data": "FunctionDef name:combined_commuting_self_adjoint_hint arg:operator_a arg:operator_b arguments arg arg If BoolOp Return return:yes If BoolOp BoolOp Compare Compare BoolOp Compare Compare Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "get_memory_growth", + "source_code": "@tf_export('config.experimental.get_memory_growth')\ndef get_memory_growth(device):\n return context.context().get_memory_growth(device)", + "docstring": "Get if memory growth is enabled for a . If memory growth is enabled for a , the runtime initialization will not allocate all memory on the device. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... assert tf.config.experimental.get_memory_growth(physical_devices[0]) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: to query Returns: A boolean indicating the memory growth setting for the . Raises: ValueError: Invalid specified.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:get_memory_growth arg:device arguments arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):\n return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)", + "docstring": "Create a frozen matrix normal distribution. See for more information.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:mean arg:rowcov arg:colcov arg:seed arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "normalize_newlines", + "source_code": "@keep_lazy_text\ndef normalize_newlines(text):\n return re_newlines.sub('\\n', str(text))", + "docstring": "Normalize CRLF and CR newlines to just LF.", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:normalize_newlines arg:text arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "Instruction", + "source_code": "@dataclass_slots\n@dataclasses.dataclass\nclass Instruction:\n opcode: int\n opname: str\n arg: Optional[int]\n argval: Any\n offset: Optional[int] = None\n starts_line: Optional[int] = None\n is_jump_target: bool = False\n positions: Optional['dis.Positions'] = None\n target: Optional['Instruction'] = None\n exn_tab_entry: Optional[InstructionExnTabEntry] = None\n argrepr: Optional[str] = None\n\n def __hash__(self) -> int:\n return id(self)\n\n def __eq__(self, other) -> bool:\n return id(self) == id(other)\n\n def short_inst_repr(self) -> str:\n return f'Instruction(opname={self.opname}, offset={self.offset})'\n\n def copy_positions(self, other: 'Instruction') -> None:\n self.starts_line = other.starts_line\n self.positions = other.positions", + "docstring": "A mutable version of dis.Instruction", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "ClassDef name:Instruction FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call FunctionDef name:short_inst_repr arg:self arguments arg Return return:yes FunctionDef name:copy_positions arg:self arg:other arguments arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "BackwardCFunction", + "source_code": "class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):\n\n def apply(self, *args):\n backward_fn = self._forward_cls.backward\n vjp_fn = self._forward_cls.vjp\n if backward_fn is not Function.backward and vjp_fn is not Function.vjp:\n raise RuntimeError(\"Implementing both 'backward' and 'vjp' for a custom Function is not allowed. You should only implement one of them.\")\n user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn\n return user_fn(self, *args)\n\n def apply_jvp(self, *args):\n return self._forward_cls.jvp(self, *args)\n\n def _compiled_autograd_key(self):\n return self._forward_cls._compiled_autograd_key(self)", + "docstring": "This class is used for internal autograd work. Do not use.", + "type": "class", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "ClassDef name:BackwardCFunction FunctionDef name:apply arg:self arguments arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign Compare Return return:yes Call FunctionDef name:apply_jvp arg:self arguments arg arg Return return:yes Call FunctionDef name:_compiled_autograd_key arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_show_ops_in_metagraph", + "source_code": "def _show_ops_in_metagraph(saved_model_dir, tag_set):\n meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n _show_ops_in_metagraph_mgd(meta_graph_def)", + "docstring": "Prints the ops in the MetaGraph. Prints all the ops used in the MetaGraphDef indicated by the tag_set stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py", + "ast_data": "FunctionDef name:_show_ops_in_metagraph arg:saved_model_dir arg:tag_set arguments arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "_scaled_mm_flop", + "source_code": "@register_flop_formula(aten._scaled_mm)\ndef _scaled_mm_flop(a_shape, b_shape, scale_a_shape, scale_b_shape, bias_shape=None, scale_result_shape=None, out_dtype=None, use_fast_accum=False, out_shape=None, **kwargs) -> int:\n return mm_flop(a_shape, b_shape)", + "docstring": "Count flops for _scaled_mm.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\flop_counter.py", + "ast_data": "FunctionDef name:_scaled_mm_flop arg:a_shape arg:b_shape arg:scale_a_shape arg:scale_b_shape arg:bias_shape arg:scale_result_shape arg:out_dtype arg:use_fast_accum arg:out_shape arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_set_joinable_configs", + "source_code": "def _set_joinable_configs(self) -> None:\n assert len(self._joinables) > 0\n is_first_joinable = True\n for joinable in self._joinables:\n joinable._join_config = _JoinConfig(enable=self._enable, throw_on_early_termination=self._throw_on_early_termination, is_first_joinable=is_first_joinable)\n is_first_joinable = False", + "docstring": "Set the :class: of each participating :class:.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py", + "ast_data": "FunctionDef name:_set_joinable_configs arg:self arguments arg Compare Call Assign For Assign Call Assign" + }, + { + "library": "pandas", + "name": "column_names", + "source_code": "@abstractmethod\ndef column_names(self) -> Iterable[str]:\n pass", + "docstring": "Return an iterator yielding the column names.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "FunctionDef name:column_names arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "get_embedding_table_size", + "source_code": "def get_embedding_table_size(self):\n return (self.categorical_column._num_buckets, self.dimension)", + "docstring": "Returns num_ids and width.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Return return:yes" + }, + { + "library": "pygame", + "name": "draw_lines", + "source_code": "def draw_lines(surf, color, closed, points, width=1):\n return _multi_lines(surf, color, closed, points, width, aaline=False)", + "docstring": "draw several lines connected through the points.", + "type": "function", + "file_path": "pygame\\src_py\\draw_py.py", + "ast_data": "FunctionDef name:draw_lines arg:surf arg:color arg:closed arg:points arg:width arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "_create_like_index_sql", + "source_code": "def _create_like_index_sql(self, model, field):\n db_type = field.db_type(connection=self.connection)\n if db_type is not None and (field.db_index or field.unique):\n if '[' in db_type:\n return None\n collation_name = getattr(field, 'db_collation', None)\n if not collation_name and field.is_relation:\n collation_name = getattr(field.target_field, 'db_collation', None)\n if collation_name and (not self._is_collation_deterministic(collation_name)):\n return None\n if db_type.startswith('varchar'):\n return self._create_index_sql(model, fields=[field], suffix='_like', opclasses=['varchar_pattern_ops'])\n elif db_type.startswith('text'):\n return self._create_index_sql(model, fields=[field], suffix='_like', opclasses=['text_pattern_ops'])\n return None", + "docstring": "Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None.", + "type": "method", + "file_path": "django\\django\\db\\backends\\postgresql\\schema.py", + "ast_data": "FunctionDef name:_create_like_index_sql arg:self arg:model arg:field arguments arg arg arg Assign Call If BoolOp Compare BoolOp If Compare Return return:no Assign Call If BoolOp Assign Call If BoolOp Call Return return:no If Call Return return:yes Call If Call Return return:yes Call Return return:no" + }, + { + "library": "scipy", + "name": "apply_filter", + "source_code": "def apply_filter(self, x, axis=-1, mode='constant', cval=0):\n output_len = _output_len(self._h_len_orig, x.shape[axis], self._up, self._down)\n output_shape = np.asarray(x.shape, dtype=np.int64)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n mode = _check_mode(mode)\n _apply(np.asarray(x, self._output_type), self._h_trans_flip, out, self._up, self._down, axis, mode, cval)\n return out", + "docstring": "Apply the prepared filter to the specified axis of N-D signal x.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_upfirdn.py", + "ast_data": "FunctionDef name:apply_filter arg:self arg:x arg:axis arg:mode arg:cval arguments arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "rand_score", + "source_code": "@validate_params({'labels_true': ['array-like'], 'labels_pred': ['array-like']}, prefer_skip_nested_validation=True)\ndef rand_score(labels_true, labels_pred):\n contingency = pair_confusion_matrix(labels_true, labels_pred)\n numerator = contingency.diagonal().sum()\n denominator = contingency.sum()\n if numerator == denominator or denominator == 0:\n return 1.0\n return float(numerator / denominator)", + "docstring": "Rand index. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings [1]_ [2]_. The raw RI score [3]_ is: .. code-block:: text RI = (number of agreeing pairs) / (number of pairs) Read more in the :ref:. Parameters ---------- labels_true : array-like of shape (n_samples,), dtype=integral Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,), dtype=integral Cluster labels to evaluate. Returns ------- RI : float Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for perfect match. See Also -------- adjusted_rand_score: Adjusted Rand Score. adjusted_mutual_info_score: Adjusted Mutual Information. References ---------- .. [1] :doi:. .. [2] _ .. [3] _ Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import rand_score >>> rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized: >>> rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.83", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py", + "ast_data": "FunctionDef name:rand_score arg:labels_true arg:labels_pred arguments arg arg Assign Call Assign Call Call Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "display_tpot", + "source_code": "def display_tpot():\n e2e_latency_mean = statistics.mean(latency_list)\n ttft_mean = statistics.mean(ttft_ms_list)\n generation_time_mean = e2e_latency_mean - ttft_mean\n tpot = generation_time_mean / (OUTPUT_TOKEN_LEN - 1)\n print(f'TPOT: {round(tpot, 2)} ms')", + "docstring": "Calculate the time per output token.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\flax_2b\\benchmark.py", + "ast_data": "FunctionDef name:display_tpot arguments Assign Call Assign Call Assign Assign Call Call" + }, + { + "library": "pytorch", + "name": "build", + "source_code": "@staticmethod\ndef build(rank: int, store: Store, local_addr: Optional[str], server_port: Optional[int]=None) -> 'RendezvousStoreInfo':\n if rank == 0:\n addr = local_addr or socket.getfqdn()\n port = server_port or get_free_port()\n store.set(RendezvousStoreInfo.MASTER_ADDR_KEY, addr.encode(encoding='UTF-8'))\n store.set(RendezvousStoreInfo.MASTER_PORT_KEY, str(port).encode(encoding='UTF-8'))\n addr = store.get(RendezvousStoreInfo.MASTER_ADDR_KEY).decode(encoding='UTF-8')\n port = int(store.get(RendezvousStoreInfo.MASTER_PORT_KEY).decode(encoding='UTF-8'))\n return RendezvousStoreInfo(master_addr=addr, master_port=port)", + "docstring": "Factory method, finds unused new port on rank0 host and addr/port info with all ranks. If master_addr/master_port is knowns (useful when sharing existing tcp store server) use the constructor. Args: rank: rank of the current node store: store to use for rendezvous local_addr: address of the current node, if not provided will be resolved from hostname server_port: port of the TCPStore server, when the TCPStore is shared.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "FunctionDef name:build arg:rank arg:store arg:local_addr arg:server_port arguments arg arg arg arg If Compare Assign BoolOp Call Assign BoolOp Call Call Call Call Call Call Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__getitem__", + "source_code": "def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:\n mask = self.groupby_object._make_mask_from_positional_indexer(arg)\n return self.groupby_object._mask_selected_obj(mask)", + "docstring": "Select by positional index per group. Implements GroupBy._positional_selector Parameters ---------- arg : PositionalIndexer | tuple Allowed values are: - int - int valued iterable such as list or range - slice with step either None or positive - tuple of integers and slices Returns ------- Series The filtered subset of the original groupby Series. DataFrame The filtered subset of the original groupby DataFrame. See Also -------- DataFrame.iloc : Integer-location based indexing for selection by position. GroupBy.head : Return first n rows of each group. GroupBy.tail : Return last n rows of each group. GroupBy._positional_selector : Return positional selection for each group. GroupBy.nth : Take the nth row from each group if n is an int, or a subset of rows, if n is a list of ints.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\indexing.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:arg arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "index_to_string_table_from_tensor", + "source_code": "def index_to_string_table_from_tensor(vocabulary_list, default_value='UNK', name=None):\n if vocabulary_list is None:\n raise ValueError('`vocabulary_list` argument must be specified.')\n with ops.name_scope(name, 'index_to_string'):\n vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)\n num_elements = array_ops.size(vocabulary_list)\n keys = math_ops.cast(math_ops.range(num_elements), dtypes.int64)\n init = KeyValueTensorInitializer(keys, vocabulary_list, dtypes.int64, dtypes.string, name='table_init')\n return StaticHashTableV1(init, default_value)", + "docstring": "Returns a lookup table that maps a of indices into strings. This operation constructs a lookup table to map int64 indices into string values. The mapping is initialized from a string 1-D where each element is a value and the corresponding index within the tensor is the key. Any input which does not have a corresponding index in 'vocabulary_list' (an out-of-vocabulary entry) is assigned the The underlying table must be initialized by calling or once. Elements in cannot have duplicates, otherwise when executing the table initializer op, it will throw a . Sample Usages: Args: vocabulary_list: A 1-D string that specifies the strings to map from indices. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). Returns: The lookup table to map a string values associated to a given index . Raises: ValueError: when is not set.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:index_to_string_table_from_tensor arg:vocabulary_list arg:default_value arg:name arguments arg arg arg If Compare Raise Call With Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "trimmed_std", + "source_code": "def trimmed_std(a, limits=(0.1, 0.1), inclusive=(1, 1), relative=True, axis=None, ddof=0):\n if not isinstance(limits, tuple) and isinstance(limits, float):\n limits = (limits, limits)\n if relative:\n out = trimr(a, limits=limits, inclusive=inclusive, axis=axis)\n else:\n out = trima(a, limits=limits, inclusive=inclusive)\n return out.std(axis=axis, ddof=ddof)", + "docstring": "Returns the trimmed standard deviation of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:trimmed_std arg:a arg:limits arg:inclusive arg:relative arg:axis arg:ddof arguments arg arg arg arg arg arg If BoolOp Call Call Assign If Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_group_key", + "source_code": "def get_group_key(self, devices):\n with self._lock:\n devices_key = ','.join(devices)\n if devices_key not in self._known_groups:\n self._known_groups[devices_key] = self._get_new_group_key(devices)\n return self._known_groups[devices_key]", + "docstring": "Returns a group key for the list of local devices. The same group key is returned if the list of local devices is the same. Args: devices: a list of local canonical device strings in a collective group. Returns: a group key.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", + "ast_data": "FunctionDef name:get_group_key arg:self arg:devices arguments arg arg With Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ensure_graph_is_valid", + "source_code": "def ensure_graph_is_valid(graph_def: graph_pb2.GraphDef) -> None:\n node_map = {}\n for node in graph_def.node:\n if node.name not in node_map:\n node_map[node.name] = node\n else:\n raise ValueError('Duplicate node names detected for ', node.name)\n for node in graph_def.node:\n for input_name in node.input:\n input_node_name = node_name_from_input(input_name)\n if input_node_name not in node_map:\n raise ValueError('Input for ', node.name, ' not found: ', input_name)", + "docstring": "Makes sure that the graph is internally consistent. Checks basic properties of the graph def and raises an exception if there are input references to missing nodes, duplicated names, or other logic errors. Args: graph_def: Definition of a graph to be checked. Raises: ValueError: If the graph is incorrectly constructed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py", + "ast_data": "FunctionDef name:ensure_graph_is_valid arg:graph_def arguments arg Assign For If Compare Assign Raise Call For For Assign Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "load", + "source_code": "def load(self, sess, tags, import_scope=None, **saver_kwargs):\n saved_model_proto = parse_saved_model(self._export_dir)\n metrics.IncrementReadApi(_LOADER_LABEL)\n with sess.graph.as_default():\n saver, _ = self.load_graph(sess.graph, tags, import_scope, **saver_kwargs)\n self.restore_variables(sess, saver, import_scope)\n self.run_init_ops(sess, tags, import_scope)\n meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n if len(saved_model_proto.meta_graphs) == 1 and saved_model_proto.meta_graphs[0].HasField('object_graph_def'):\n metrics.IncrementRead(write_version='2')\n else:\n metrics.IncrementRead(write_version='1')\n return meta_graph_def", + "docstring": "Load the MetaGraphDef graph and restore variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned. **saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph. Returns: proto of the graph that was loaded.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:load arg:self arg:sess arg:tags arg:import_scope arguments arg arg arg arg arg Assign Call Call With Call Assign Call Call Call Assign Call If BoolOp Compare Call Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "query_token", + "source_code": "def query_token(self, token_string, token_type_hint):\n raise NotImplementedError()", + "docstring": "Get the token from database/storage by the given token string. Developers should implement this method:: def query_token(self, token_string, token_type_hint): if token_type_hint == \"access_token\": tok = Token.query_by_access_token(token_string) elif token_type_hint == \"refresh_token\": tok = Token.query_by_refresh_token(token_string) else: tok = Token.query_by_access_token(token_string) if not tok: tok = Token.query_by_refresh_token(token_string) return tok", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py", + "ast_data": "FunctionDef name:query_token arg:self arg:token_string arg:token_type_hint arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "node_supports_equalization", + "source_code": "def node_supports_equalization(node: Node, modules) -> bool:\n if node.op == 'call_module':\n return nn_module_supports_equalization(modules[str(node.target)]) or fused_module_supports_equalization(modules[str(node.target)]) or custom_module_supports_equalization(modules[str(node.target)])\n elif node.op == 'call_function':\n return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d]\n return False", + "docstring": "Checks if the current node supports equalization Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:node_supports_equalization arg:node arg:modules arguments arg arg If Compare Return return:yes BoolOp Call Call Call Call Call Call If Compare Return return:yes Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "is_frozen_param", + "source_code": "def is_frozen_param(t: torch.Tensor) -> bool:\n return getattr(t, '_is_frozen_param', False)", + "docstring": "Return True if the tensor is a frozen param.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py", + "ast_data": "FunctionDef name:is_frozen_param arg:t arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "sparse_slice", + "source_code": "@tf_export('sparse.slice', v1=['sparse.slice', 'sparse_slice'])\n@deprecation.deprecated_endpoints('sparse_slice')\ndef sparse_slice(sp_input, start, size, name=None):\n sp_input = _convert_to_sparse_tensor(sp_input)\n start = ops.convert_to_tensor(start, dtypes.int64)\n size = ops.convert_to_tensor(size, dtypes.int64)\n with ops.name_scope(name, 'SparseSlice', [sp_input]) as name:\n output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name)\n return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)", + "docstring": "Slice a based on the and . For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A objects resulting from splicing. Raises: TypeError: If is not a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:sparse_slice arg:sp_input arg:start arg:size arg:name arguments arg arg arg arg Assign Call Assign Call Assign Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "deregister_context", + "source_code": "def deregister_context(self, context_words):\n for context_word in context_words:\n if context_word not in self._comp_dict:\n raise KeyError('Cannot deregister unregistered context word \"%s\"' % context_word)\n for context_word in context_words:\n del self._comp_dict[context_word]", + "docstring": "Deregister a list of context words. Args: context_words: A list of context words to deregister, as a list of str. Raises: KeyError: if there are word(s) in context_words that do not correspond to any registered contexts.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:deregister_context arg:self arg:context_words arguments arg arg For If Compare Raise Call For" + }, + { + "library": "tensorflow", + "name": "isnamedtuple", + "source_code": "def isnamedtuple(f):\n if not (tf_inspect.isclass(f) and issubclass(f, tuple)):\n return False\n if not hasattr(f, '_fields'):\n return False\n fields = getattr(f, '_fields')\n if not isinstance(fields, tuple):\n return False\n if not all((isinstance(f, str) for f in fields)):\n return False\n return True", + "docstring": "Returns True if the argument is a namedtuple-like.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py", + "ast_data": "FunctionDef name:isnamedtuple arg:f arguments arg If BoolOp Call Call Return return:yes If Call Return return:yes Assign Call If Call Return return:yes If Call Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "from_custom_template", + "source_code": "@classmethod\ndef from_custom_template(cls, searchpath: Sequence[str], html_table: str | None=None, html_style: str | None=None) -> type[Styler]:\n loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])\n\n class MyStyler(cls):\n env = jinja2.Environment(loader=loader)\n if html_table:\n template_html_table = env.get_template(html_table)\n if html_style:\n template_html_style = env.get_template(html_style)\n return MyStyler", + "docstring": "Factory function for creating a subclass of `Table Visualization `_ for more examples.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:from_custom_template arg:cls arg:searchpath arg:html_table arg:html_style arguments arg arg arg arg Assign Call Call ClassDef name:MyStyler Assign Call If Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_input_shape_at", + "source_code": "def get_input_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')", + "docstring": "Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:get_input_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "eager_handle_data", + "source_code": "@property\ndef eager_handle_data(self):\n return _get_handle_data(self._matrix) if self._eager_mode else None", + "docstring": "Return the matrix's handle data iff in eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py", + "ast_data": "FunctionDef name:eager_handle_data arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_CVObjects", + "source_code": "class _CVObjects(_Constraint):\n\n def __init__(self):\n super().__init__()\n self._constraints = [Interval(Integral, 2, None, closed='left'), HasMethods(['split', 'get_n_splits']), _IterablesNotString(), _NoneConstraint()]\n\n def is_satisfied_by(self, val):\n return any((c.is_satisfied_by(val) for c in self._constraints))\n\n def __str__(self):\n return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'", + "docstring": "Constraint representing cv objects. Convenient class for [ Interval(Integral, 2, None, closed=\"left\"), HasMethods([\"split\", \"get_n_splits\"]), _IterablesNotString(), None, ]", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:_CVObjects FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_anncoords", + "source_code": "def set_anncoords(self, coords):\n self._textcoords = coords", + "docstring": "Set the coordinate system to use for . See also *xycoords* in .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_anncoords arg:self arg:coords arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "TensorBoardVersionSelector", + "source_code": "class TensorBoardVersionSelector(object):\n\n def __new__(cls, *args, **kwargs):\n use_v2 = should_use_v2()\n start_cls = cls\n cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard, use_v2)\n if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:\n return cls(*args, **kwargs)\n return super(TensorBoardVersionSelector, cls).__new__(cls)", + "docstring": "Chooses between Keras v1 and v2 TensorBoard callback class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py", + "ast_data": "ClassDef name:TensorBoardVersionSelector FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ready_size", + "source_code": "def ready_size(self, name=None):\n if name is None:\n name = '%s_BarrierReadySize' % self._name\n return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name)", + "docstring": "Compute the number of complete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of complete elements in the given barrier.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:ready_size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "_matmat", + "source_code": "def _matmat(self, X):\n return np.hstack([self.matvec(col.reshape(-1, 1)) for col in X.T])", + "docstring": "Default matrix-matrix multiplication handler. Falls back on the user-defined _matvec method, so defining that will define matrix multiplication (though in a very suboptimal way).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", + "ast_data": "FunctionDef name:_matmat arg:self arg:X arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "PositiveDefiniteTransform", + "source_code": "class PositiveDefiniteTransform(Transform):\n domain = constraints.independent(constraints.real, 2)\n codomain = constraints.positive_definite\n\n def __eq__(self, other):\n return isinstance(other, PositiveDefiniteTransform)\n\n def _call(self, x):\n x = LowerCholeskyTransform()(x)\n return x @ x.mT\n\n def _inverse(self, y):\n y = torch.linalg.cholesky(y)\n return LowerCholeskyTransform().inv(y)", + "docstring": "Transform from unconstrained matrices to positive-definite matrices.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "ClassDef name:PositiveDefiniteTransform Assign Call Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Assign Call Call Return return:yes FunctionDef name:_inverse arg:self arg:y arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_add_sparse_feature", + "source_code": "def _add_sparse_feature(self, key, feature):\n if not feature.index_key:\n raise ValueError(f'Missing index_key for SparseFeature {feature}.')\n if not feature.value_key:\n raise ValueError(f'Missing value_key for SparseFeature {feature}.')\n if not feature.dtype:\n raise ValueError(f'Missing type for feature {key}. Received feature={feature}.')\n index_keys = feature.index_key\n if isinstance(index_keys, str):\n index_keys = [index_keys]\n elif len(index_keys) > 1:\n tf_logging.warning('SparseFeature is a complicated feature config and should only be used after careful consideration of VarLenFeature.')\n for index_key in sorted(index_keys):\n self._add_sparse_key(index_key, dtypes.int64)\n self._add_sparse_key(feature.value_key, feature.dtype)", + "docstring": "Adds a SparseFeature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_add_sparse_feature arg:self arg:key arg:feature arguments arg arg arg If Raise Call If Raise Call If Raise Call Assign If Call Assign If Compare Call Call For Call Call Call" + }, + { + "library": "django", + "name": "__rmul__", + "source_code": "def __rmul__(self, n):\n return self.__class__(list(self) * n)", + "docstring": "multiply", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:__rmul__ arg:self arg:n arguments arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "__pow__", + "source_code": "def __pow__(self, t: float) -> 'Quaternion':\n theta = self.polar_angle[..., None]\n vec_norm = self.vec.norm(dim=-1, keepdim=True)\n n = where(vec_norm != 0, self.vec / vec_norm, self.vec * 0)\n w = (t * theta).cos()\n xyz = (t * theta).sin() * n\n return Quaternion(concatenate((w, xyz), -1))", + "docstring": "Return the power of a quaternion raised to exponent t. Args: t: raised exponent. Example: >>> q = Quaternion(tensor([1., .5, 0., 0.])) >>> q_pow = q**2", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:__pow__ arg:self arg:t arguments arg arg Assign Assign Call Assign Call Compare Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "desc_name", + "source_code": "class desc_name(_desc_classes_injector, nodes.Part, nodes.Inline, nodes.FixedTextElement):\n classes = ['sig-name', 'descname']", + "docstring": "Node for the main object name. For example, in the declaration of a Python class ``.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_name Assign" + }, + { + "library": "tensorflow", + "name": "_gen_gradient_func", + "source_code": "def _gen_gradient_func(func):\n\n def gradient_func(unused_op, *result_grads):\n\n def none_to_zero(x, t):\n if x is not None:\n return x\n shape, dtype = default_gradient.shape_and_dtype(t)\n if shape.is_fully_defined():\n return default_gradient.zeros_like(t)\n dims = []\n if shape.rank is not None:\n dims = [1 if d is None else d for d in shape.as_list()]\n return array_ops.zeros(dims, dtype)\n result_grads = [none_to_zero(x, t) for x, t in zip(result_grads, func.graph.inputs)]\n return func(*result_grads)\n return gradient_func", + "docstring": "Wraps a deserialized function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:_gen_gradient_func arg:func arguments arg FunctionDef name:gradient_func arg:unused_op arguments arg arg FunctionDef name:none_to_zero arg:x arg:t arguments arg arg If Compare Return return:yes Assign Call If Call Return return:yes Call Assign If Compare Assign Compare Call Return return:yes Call Assign Call Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_convert_object_or_list", + "source_code": "def _convert_object_or_list(nested):\n if wrap:\n if isinstance(nested, ListWrapper):\n return nested\n if _is_serialized_node_data(nested):\n return ListWrapper(nested)\n return nested\n else:\n if isinstance(nested, ListWrapper):\n return nested.as_list()\n return nested", + "docstring": "Convert b/t object and list representations.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:_convert_object_or_list arg:nested arguments arg If If Call Return return:yes If Call Return return:yes Call Return return:yes If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "release", + "source_code": "def release(self):\n if self.fd is not None:\n os.close(self.fd)\n os.remove(self.lock_file_path)", + "docstring": "Release the baton and removes its file.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\file_baton.py", + "ast_data": "FunctionDef name:release arg:self arguments arg If Compare Call Call" + }, + { + "library": "matplotlib", + "name": "font_path", + "source_code": "@property\ndef font_path(self):\n psfont = self._get_pdftexmap_entry()\n if psfont.filename is None:\n raise ValueError('No usable font file found for {} ({}); the font may lack a Type-1 version'.format(psfont.psname.decode('ascii'), psfont.texname.decode('ascii')))\n return Path(psfont.filename)", + "docstring": "The to the font for this glyph.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "FunctionDef name:font_path arg:self arguments arg Assign Call If Compare Raise Call Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "eta", + "source_code": "def eta(lam):\n if lam > 0:\n return mp.sqrt(2 * (lam - mp.log(lam + 1)))\n elif lam < 0:\n return -mp.sqrt(2 * (lam - mp.log(lam + 1)))\n else:\n return 0", + "docstring": "Function from DLMF 8.12.1 shifted to be centered at 0.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_precompute\\gammainc_asy.py", + "ast_data": "FunctionDef name:eta arg:lam arguments arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_convert_datetime_to_stata_type", + "source_code": "def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:\n if fmt in ['tc', '%tc', 'td', '%td', 'tw', '%tw', 'tm', '%tm', 'tq', '%tq', 'th', '%th', 'ty', '%ty']:\n return np.dtype(np.float64)\n else:\n raise NotImplementedError(f'Format {fmt} not implemented')", + "docstring": "Convert from one of the stata date formats to a type in TYPE_MAP.", + "type": "function", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_convert_datetime_to_stata_type arg:fmt arguments arg If Compare Return return:yes Call Raise Call" + }, + { + "library": "django", + "name": "BaseListView", + "source_code": "class BaseListView(MultipleObjectMixin, View):\n\n def get(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n allow_empty = self.get_allow_empty()\n if not allow_empty:\n if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):\n is_empty = not self.object_list.exists()\n else:\n is_empty = not self.object_list\n if is_empty:\n raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % {'class_name': self.__class__.__name__})\n context = self.get_context_data()\n return self.render_to_response(context)", + "docstring": "Base view for displaying a list of objects. This requires subclassing to provide a response mixin.", + "type": "class", + "file_path": "django\\django\\views\\generic\\list.py", + "ast_data": "ClassDef name:BaseListView FunctionDef name:get arg:self arg:request arguments arg arg arg arg Assign Call Assign Call If If BoolOp Compare Call Call Assign Call Assign If Raise Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_watch", + "source_code": "def _watch(self, primals, tangents):\n\n def _watch(primal, tangent):\n if not primal.dtype.is_floating:\n logging.log_first_n(logging.WARN, 'The dtype of the watched primal must be floating (e.g. tf.float32), got %r', 5, primal.dtype)\n tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype)\n if hasattr(primal, 'handle'):\n primal = ops.convert_to_tensor(primal.handle)\n pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal, tangent)\n nest.map_structure(_watch, primals, tangents)", + "docstring": "Ensures that are being traced by this accumulator. Mathematically, is a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while this accumulator is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied in advance. Watching a single tensor multiple times sums each of its . Any un-watched tensor has zeros for its tangent vector. Args: primals: A Tensor or list of Tensors. tangents: A Tensor or list of Tensors matching .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py", + "ast_data": "FunctionDef name:_watch arg:self arg:primals arg:tangents arguments arg arg arg FunctionDef name:_watch arg:primal arg:tangent arguments arg arg If Call Assign Call If Call Assign Call Call Call" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", + "docstring": "Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features), or None Test samples. If , predictions for all indexed points are used; in this case, points are not considered their own neighbors. This means that implicitly performs a leave-one-out cross-validation procedure and is equivalent to but typically much faster. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for . sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of `y`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_classification.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "Similarity", + "source_code": "class Similarity(BaseModel):\n\n def __init__(self, rotation: bool=True, scale: bool=True, shift: bool=True) -> None:\n super().__init__()\n if rotation:\n self.rot = nn.Parameter(torch.zeros(1))\n else:\n self.register_buffer('rot', torch.zeros(1))\n if shift:\n self.shift = nn.Parameter(torch.zeros(1, 2, 1))\n else:\n self.register_buffer('shift', torch.zeros(1, 2, 1))\n if scale:\n self.scale = nn.Parameter(torch.ones(1))\n else:\n self.register_buffer('scale', torch.ones(1))\n self.reset_model()\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(angle = {self.rot}, \\n shift={self.shift}, \\n scale={self.scale})'\n\n def reset_model(self) -> None:\n torch.nn.init.zeros_(self.rot)\n torch.nn.init.zeros_(self.shift)\n torch.nn.init.ones_(self.scale)\n\n def forward(self) -> Tensor:\n rot = self.scale * angle_to_rotation_matrix(self.rot)\n out = convert_affinematrix_to_homography(torch.cat([rot, self.shift], dim=2))\n return out\n\n def forward_inverse(self) -> Tensor:\n return torch.inverse(self.forward())", + "docstring": "Similarity geometric model to be used with ImageRegistrator module for the optimization-based image registration. Args: rotation: if True, the rotation is optimizable, else constant zero. scale: if True, the scale is optimizable, else constant zero. shift: if True, the shift is optimizable, else constant one.", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py", + "ast_data": "ClassDef name:Similarity FunctionDef name:__init__ arg:self arg:rotation arg:scale arg:shift arguments arg arg arg arg Call Call If Assign Call Call Call Call If Assign Call Call Call Call If Assign Call Call Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:reset_model arg:self arguments arg Call Call Call FunctionDef name:forward arg:self arguments arg Assign Call Assign Call Call Return return:yes FunctionDef name:forward_inverse arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "__init__", + "source_code": "def __init__(self, ratio):\n self.ratio = ratio", + "docstring": "create a new collide_rect_ratio callable Ratio is expected to be a floating point value used to scale the underlying sprite rect before checking for collisions.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "path_to_bytes", + "source_code": "def path_to_bytes(path):\n if hasattr(path, '__fspath__'):\n path = path.__fspath__()\n return as_bytes(path)", + "docstring": "Converts input which is a object to . Converts from any python constant representation of a object or to bytes. Args: path: An object that can be converted to path representation. Returns: A object. Usage: In case a simplified version of the path is needed from an object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py", + "ast_data": "FunctionDef name:path_to_bytes arg:path arguments arg If Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self):\n dmin, dmax = self.axis.get_data_interval()\n return self.tick_values(dmin, dmax)", + "docstring": "Return the locations of the ticks", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "_check_raw_id_fields", + "source_code": "def _check_raw_id_fields(self, obj):\n if not isinstance(obj.raw_id_fields, (list, tuple)):\n return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')\n else:\n return list(chain.from_iterable((self._check_raw_id_fields_item(obj, field_name, 'raw_id_fields[%d]' % index) for index, field_name in enumerate(obj.raw_id_fields))))", + "docstring": "Check that only contains field names that are listed on the model.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_raw_id_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_maybe_create_saver", + "source_code": "def _maybe_create_saver(self, saver=None):\n if not saver:\n saver = tf_saver.Saver(variables._all_saveable_objects(), sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True)\n return saver", + "docstring": "Creates a sharded saver if one does not already exist.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:_maybe_create_saver arg:self arg:saver arguments arg arg If Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "_clone_test_db", + "source_code": "def _clone_test_db(self, suffix, verbosity, keepdb=False):\n raise NotImplementedError(\"The database backend doesn't support cloning databases. Disable the option to run tests in parallel processes.\")", + "docstring": "Internal implementation - duplicate the test db tables.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\creation.py", + "ast_data": "FunctionDef name:_clone_test_db arg:self arg:suffix arg:verbosity arg:keepdb arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "CommunicationImplementation", + "source_code": "@tf_export('distribute.experimental.CommunicationImplementation', 'distribute.experimental.CollectiveCommunication')\nclass CommunicationImplementation(enum.Enum):\n AUTO = 'AUTO'\n RING = 'RING'\n NCCL = 'NCCL'", + "docstring": "Cross device communication implementation. Warning: The alias is deprecated and will be removed in a future version. Use instead. * : Automatically chosen by Tensorflow. * : TensorFlow's ring algorithms for all-reduce and all-gather. * : NVIDIA®'s NCCL library. This is now only used for all-reduce on GPUs; all-reduce on CPU, all-gather and broadcast fallbacks to RING.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py", + "ast_data": "ClassDef name:CommunicationImplementation Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "_autopacking_conversion_function", + "source_code": "def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n if as_ref or _should_not_autopack(v):\n return NotImplemented\n inferred_dtype = _get_dtype_from_nested_lists(v)\n if inferred_dtype is None:\n return NotImplemented\n if dtype is None:\n dtype = inferred_dtype\n elif dtype != inferred_dtype:\n v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)\n return _autopacking_helper(v, dtype, name or 'packed')", + "docstring": "Tensor conversion function that automatically packs arguments.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:_autopacking_conversion_function arg:v arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Call Return return:yes Assign Call If Compare Return return:yes If Compare Assign If Compare Assign Call Call Return return:yes Call BoolOp" + }, + { + "library": "scipy", + "name": "kaiser_beta", + "source_code": "def kaiser_beta(a):\n if a > 50:\n beta = 0.1102 * (a - 8.7)\n elif a > 21:\n beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)\n else:\n beta = 0.0\n return beta", + "docstring": "Compute the Kaiser parameter , given the attenuation . Parameters ---------- a : float The desired attenuation in the stopband and maximum ripple in the passband, in dB. This should be a *positive* number. Returns ------- beta : float The parameter to be used in the formula for a Kaiser window. References ---------- Oppenheim, Schafer, \"Discrete-Time Signal Processing\", p.475-476. Examples -------- Suppose we want to design a lowpass filter, with 65 dB attenuation in the stop band. The Kaiser window parameter to be used in the window method is computed by ``: >>> from scipy.signal import kaiser_beta >>> kaiser_beta(65) 6.20426", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_fir_filter_design.py", + "ast_data": "FunctionDef name:kaiser_beta arg:a arguments arg If Compare Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "fetch_attr", + "source_code": "@compatibility(is_backward_compatible=True)\ndef fetch_attr(self, target: str):\n target_atoms = target.split('.')\n attr_itr = self.module\n for i, atom in enumerate(target_atoms):\n if not hasattr(attr_itr, atom):\n raise RuntimeError(f'Node referenced nonexistent target {'.'.join(target_atoms[:i + 1])}')\n attr_itr = getattr(attr_itr, atom)\n return attr_itr", + "docstring": "Fetch an attribute from the ``. Args: target (str): The fully-qualified name of the attribute to fetch Return: Any: The value of the attribute.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\interpreter.py", + "ast_data": "FunctionDef name:fetch_attr arg:self arg:target arguments arg arg Assign Call Assign For Call If Call Raise Call Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_custom_index_name", + "source_code": "def _get_custom_index_name(self):\n return self.xlabel", + "docstring": "Specify whether xlabel/ylabel should be used to override index name", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py", + "ast_data": "FunctionDef name:_get_custom_index_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "broadcast_to", + "source_code": "def broadcast_to(rt_input, shape: DynamicRaggedShape):\n if not isinstance(shape, DynamicRaggedShape):\n raise TypeError('shape must be a DynamicRaggedShape')\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n origin_shape = None\n if ragged_tensor.is_ragged(rt_input):\n if shape.num_row_partitions != 0:\n if rt_input.row_splits.dtype != shape.dtype:\n raise ValueError('Cannot coerce row_splits.dtype')\n else:\n shape = shape.with_dtype(rt_input.row_splits.dtype)\n origin_shape = DynamicRaggedShape.from_tensor(rt_input)\n elif shape.num_row_partitions != 0:\n origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)\n else:\n origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=dtypes.int64)\n shape = shape.with_dtype(dtype=dtypes.int64)\n broadcaster = _get_broadcaster(origin_shape, shape)\n return broadcaster.broadcast(rt_input)", + "docstring": "Broadcasts a potentially ragged tensor to a ragged shape. Tiles as necessary to match the given shape. Behavior is undefined if is not broadcast-compatible with . Args: rt_input: The potentially ragged tensor to broadcast. shape: A Returns: A potentially ragged tensor whose values are taken from , and whose shape matches .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:broadcast_to arg:rt_input arg:shape arguments arg arg If Call Raise Call Assign Call Assign If Call If Compare If Compare Raise Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "export", + "source_code": "def export(self, name=None):\n with ops.name_scope(name, '%s_lookup_table_export_values' % self.name, [self.resource_handle]):\n with ops.colocate_with(self.resource_handle):\n exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n return (exported_keys, exported_values)", + "docstring": "Returns tensors of all keys and values in the table. Args: name: A name for the operation (optional). Returns: A pair of tensors with the first tensor containing all keys and the second tensors containing all values in the table.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:export arg:self arg:name arguments arg arg With Call With Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "obrientransform", + "source_code": "def obrientransform(*args):\n data = argstoarray(*args).T\n v = data.var(axis=0, ddof=1)\n m = data.mean(0)\n n = data.count(0).astype(float)\n data -= m\n data **= 2\n data *= (n - 1.5) * n\n data -= 0.5 * v * (n - 1)\n data /= (n - 1.0) * (n - 2.0)\n if not ma.allclose(v, data.mean(0)):\n raise ValueError('Lack of convergence in obrientransform.')\n return data", + "docstring": "Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in `f_oneway()` run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Returns: transformed data for use in an ANOVA", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:obrientransform arguments arg Assign Call Assign Call Assign Call Assign Call Call If Call Call Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "nbytes", + "source_code": "@cache_readonly\ndef nbytes(self) -> int:\n rng = self._range\n return getsizeof(rng) + sum((getsizeof(getattr(rng, attr_name)) for attr_name in ['start', 'stop', 'step']))", + "docstring": "Return the number of bytes in the underlying data.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:nbytes arg:self arguments arg Assign Return return:yes Call Call Call Call" + }, + { + "library": "authlib", + "name": "create_endpoint_response", + "source_code": "def create_endpoint_response(self, request):\n client = self.authenticate_endpoint_client(request)\n token = self.authenticate_token(request, client)\n body = self.create_introspection_payload(token)\n return (200, body, default_json_headers)", + "docstring": "Validate introspection request and create the response. :returns: (status_code, body, headers)", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py", + "ast_data": "FunctionDef name:create_endpoint_response arg:self arg:request arguments arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "node_recipients", + "source_code": "def node_recipients(self, node_name, is_control=False, device_name=None):\n if not self._debug_graphs:\n raise LookupError('Node recipients are not loaded from partition graphs yet.')\n device_name = self._infer_device_name(device_name, node_name)\n debug_graph = self._debug_graphs[device_name]\n if is_control:\n return debug_graph.node_ctrl_recipients[node_name]\n else:\n return debug_graph.node_recipients[node_name]", + "docstring": "Get recipient of the given node's output according to partition graphs. Args: node_name: () name of the node. is_control: () whether control outputs, rather than non-control outputs, are to be returned. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: ( of ) all inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:node_recipients arg:self arg:node_name arg:is_control arg:device_name arguments arg arg arg arg If Raise Call Assign Call Assign If Return return:yes Return return:yes" + }, + { + "library": "authlib", + "name": "get_jwks", + "source_code": "def get_jwks(self):\n raise NotImplementedError()", + "docstring": "Return the JWKs that will be used to check the JWT access token signature. Developers MUST re-implement this method:: def get_jwks(self): return load_jwks(\"jwks.json\")", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc9068\\revocation.py", + "ast_data": "FunctionDef name:get_jwks arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "fetch_returned_insert_rows", + "source_code": "def fetch_returned_insert_rows(self, cursor):\n return cursor.fetchall()", + "docstring": "Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data.", + "type": "method", + "file_path": "django\\django\\db\\backends\\postgresql\\operations.py", + "ast_data": "FunctionDef name:fetch_returned_insert_rows arg:self arg:cursor arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "diff", + "source_code": "@final\ndef diff(self, periods: int=1) -> Index:\n return Index(self.to_series().diff(periods))", + "docstring": "Computes the difference between consecutive values in the Index object. If periods is greater than 1, computes the difference between values that are number of positions apart. Parameters ---------- periods : int, optional The number of positions between the current and previous value to compute the difference with. Default is 1. Returns ------- Index A new Index object with the computed differences. Examples -------- >>> import pandas as pd >>> idx = pd.Index([10, 20, 30, 40, 50]) >>> idx.diff() Index([nan, 10.0, 10.0, 10.0, 10.0], dtype='float64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:diff arg:self arg:periods arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "DurationDomain", + "source_code": "class DurationDomain(Domain):\n name = 'duration'\n\n @property\n def reading_durations(self) -> dict[str, float]:\n return self.data.setdefault('reading_durations', {})\n\n def note_reading_duration(self, duration: float) -> None:\n self.reading_durations[self.env.docname] = duration\n\n def clear(self) -> None:\n self.reading_durations.clear()\n\n def clear_doc(self, docname: str) -> None:\n self.reading_durations.pop(docname, None)\n\n def merge_domaindata(self, docnames: Set[str], otherdata: _DurationDomainData) -> None:\n other_reading_durations = otherdata.get('reading_durations', {})\n docnames_set = frozenset(docnames)\n for docname, duration in other_reading_durations.items():\n if docname in docnames_set:\n self.reading_durations[docname] = duration", + "docstring": "A domain for durations of Sphinx processing.", + "type": "class", + "file_path": "sphinx\\sphinx\\ext\\duration.py", + "ast_data": "ClassDef name:DurationDomain Assign FunctionDef name:reading_durations arg:self arguments arg Return return:yes Call FunctionDef name:note_reading_duration arg:self arg:duration arguments arg arg Assign FunctionDef name:clear arg:self arguments arg Call FunctionDef name:clear_doc arg:self arg:docname arguments arg arg Call FunctionDef name:merge_domaindata arg:self arg:docnames arg:otherdata arguments arg arg arg Assign Call Assign Call For Call If Compare Assign" + }, + { + "library": "scikit-learn", + "name": "feature_names_in_", + "source_code": "@property\ndef feature_names_in_(self):\n return self.steps[0][1].feature_names_in_", + "docstring": "Names of features seen during first step method.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:feature_names_in_ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "deserialize_many_sparse", + "source_code": "@tf_export('io.deserialize_many_sparse', v1=['io.deserialize_many_sparse', 'deserialize_many_sparse'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('deserialize_many_sparse')\ndef deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):\n output_indices, output_values, output_shape = gen_sparse_ops.deserialize_many_sparse(serialized_sparse, dtype, name=name)\n output_indices.set_shape([None, rank])\n output_shape.set_shape([rank])\n return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)", + "docstring": "Deserialize and concatenate from a serialized minibatch. The input must be a string matrix of shape where is the minibatch size and the rows correspond to packed outputs of . The ranks of the original objects must all match. When the final is created, it has rank one higher than the ranks of the incoming objects (they have been concatenated along a new row dimension). The output object's shape values for all dimensions but the first are the max across the input objects' shape values for the corresponding dimensions. Its first shape value is , the minibatch size. The input objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run to restore index ordering. For example, if the serialized input is a matrix representing two original objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: 2-D of type of shape . The serialized and packed objects. dtype: The of the serialized objects. rank: (optional) Python int, the rank of the objects. name: A name prefix for the returned tensors (optional) Returns: A representing the deserialized s, concatenated along the s' first dimension. All of the serialized s must have had the same rank and type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:deserialize_many_sparse arg:serialized_sparse arg:dtype arg:rank arg:name arguments arg arg arg arg Assign Call Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "embedding_tables", + "source_code": "@property\ndef embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n self._maybe_build()\n return {stacked_table_name: self._variables[stacked_table_name]['parameters'] for stacked_table_name in self._stacked_table_to_tables}", + "docstring": "Returns a dict of embedding tables, keyed by .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg Call Return return:yes" + }, + { + "library": "scipy", + "name": "write_element", + "source_code": "def write_element(self, arr, mdtype=None):\n if mdtype is None:\n mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]\n if arr.dtype.byteorder == swapped_code:\n arr = arr.byteswap().view(arr.dtype.newbyteorder())\n byte_count = arr.size * arr.itemsize\n if byte_count <= 4:\n self.write_smalldata_element(arr, mdtype, byte_count)\n else:\n self.write_regular_element(arr, mdtype, byte_count)", + "docstring": "write tag and data", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:write_element arg:self arg:arr arg:mdtype arguments arg arg arg If Compare Assign If Compare Assign Call Call Call Assign If Compare Call Call" + }, + { + "library": "pytorch", + "name": "set_deterministic_debug_mode", + "source_code": "def set_deterministic_debug_mode(debug_mode: _Union[builtins.int, str]) -> None:\n if not isinstance(debug_mode, (builtins.int, str)):\n raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')\n if isinstance(debug_mode, str):\n if debug_mode == 'default':\n debug_mode = 0\n elif debug_mode == 'warn':\n debug_mode = 1\n elif debug_mode == 'error':\n debug_mode = 2\n else:\n raise RuntimeError(f'invalid value of debug_mode, expected one of `default`, `warn`, `error`, but got {debug_mode}')\n if debug_mode == 0:\n _C._set_deterministic_algorithms(False)\n elif debug_mode == 1:\n _C._set_deterministic_algorithms(True, warn_only=True)\n elif debug_mode == 2:\n _C._set_deterministic_algorithms(True)\n else:\n raise RuntimeError(f'invalid value of debug_mode, expected 0, 1, or 2, but got {debug_mode}')", + "docstring": "Sets the debug mode for deterministic operations. .. note:: This is an alternative interface for :func:. Refer to that function's documentation for details about affected operations. Args: debug_mode(str or int): If \"default\" or 0, don't error or warn on nondeterministic operations. If \"warn\" or 1, warn on nondeterministic operations. If \"error\" or 2, error on nondeterministic operations.", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:set_deterministic_debug_mode arg:debug_mode arguments arg If Call Raise Call Call If Call If Compare Assign If Compare Assign If Compare Assign Raise Call If Compare Call If Compare Call If Compare Call Raise Call" + }, + { + "library": "scipy", + "name": "_cumulative_simpson_unequal_intervals", + "source_code": "def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:\n x21 = dx[..., :-1]\n x32 = dx[..., 1:]\n f1 = y[..., :-2]\n f2 = y[..., 1:-1]\n f3 = y[..., 2:]\n x31 = x21 + x32\n x21_x31 = x21 / x31\n x21_x32 = x21 / x32\n x21x21_x31x32 = x21_x31 * x21_x32\n coeff1 = 3 - x21_x31\n coeff2 = 3 + x21x21_x31x32 + x21_x31\n coeff3 = -x21x21_x31x32\n return x21 / 6 * (coeff1 * f1 + coeff2 * f2 + coeff3 * f3)", + "docstring": "Calculate the Simpson integrals for all h1 intervals assuming unequal interval widths. The function can also be used to calculate the integral for all h2 intervals by reversing the inputs, and .", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_quadrature.py", + "ast_data": "FunctionDef name:_cumulative_simpson_unequal_intervals arg:y arg:dx arguments arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_array_indexing", + "source_code": "def _array_indexing(array, key, key_dtype, axis):\n xp, is_array_api = get_namespace(array)\n if is_array_api:\n return xp.take(array, key, axis=axis)\n if issparse(array) and key_dtype == 'bool':\n key = np.asarray(key)\n if isinstance(key, tuple):\n key = list(key)\n return array[key, ...] if axis == 0 else array[:, key]", + "docstring": "Index an array or scipy.sparse consistently across NumPy version.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_indexing.py", + "ast_data": "FunctionDef name:_array_indexing arg:array arg:key arg:key_dtype arg:axis arguments arg arg arg arg Assign Call If Return return:yes Call If BoolOp Call Compare Assign Call If Call Assign Call Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_stride_vars", + "source_code": "def _stride_vars(self, index: Expr, vars: Sequence[sympy.Symbol], support_vars: Sequence[sympy.Symbol]) -> list[Expr]:\n strides = []\n index = self.simplify(index)\n index = index - sympy_subs(index, {v: sympy.S.Zero for v in support_vars if v != 0})\n for i in range(len(vars)):\n index_dim = sympy_subs(index, {support_vars[j]: sympy.S.Zero for j in range(len(support_vars)) if vars[i] != support_vars[j] and support_vars[j] != 0})\n v = vars[i]\n if v == 0:\n strides.append(sympy.S.Zero)\n else:\n strides.append(sympy_subs(index_dim, {v: sympy.S.One}) - sympy_subs(index_dim, {v: sympy.S.Zero}))\n return strides", + "docstring": "Convert an indexing expression back into strides NOTE: This is only valid if the index is a standard strided offset calculation. e.g. 10 * ModularIndexing(i0 + 1, 1, 2) would give a stride of -10 because the index wraps around after the first element", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:_stride_vars arg:self arg:index arg:vars arg:support_vars arguments arg arg arg arg Assign Assign Call Assign Call Compare For Call Call Assign Call Call Call BoolOp Compare Compare Assign If Compare Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "DistributionCombination", + "source_code": "class DistributionCombination(combinations_lib.TestCombination):\n\n def should_execute_combination(self, kwargs):\n distributions = [v for v in kwargs.values() if isinstance(v, NamedDistribution)]\n if test_util.is_xla_enabled() and any((d.no_xla for d in distributions)):\n return (False, 'n/a: skipping strategy combination with no_xla=True in XLA tests')\n return (True, None)\n\n def parameter_modifiers(self):\n return [DistributionParameter(), combinations_lib.OptionalParameter('use_var_policy')]", + "docstring": "Sets up distribution strategy for tests.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "ClassDef name:DistributionCombination FunctionDef name:should_execute_combination arg:self arg:kwargs arguments arg arg Assign Call Call If BoolOp Call Call Return return:yes Return return:yes FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "tonative", + "source_code": "def tonative(n, encoding='ISO-8859-1'):\n if isinstance(n, bytes):\n return n.decode(encoding)\n return n", + "docstring": "Return the given string as a native string in the given encoding.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpcompat.py", + "ast_data": "FunctionDef name:tonative arg:n arg:encoding arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "LimitedParamBenchmark", + "source_code": "class LimitedParamBenchmark(Benchmark):\n num_param_combinations = 0\n\n def setup(self, *args, **kwargs):\n slow = is_xslow()\n if slow:\n return\n param_seed = kwargs.pop('param_seed', None)\n if param_seed is None:\n param_seed = 1\n params = kwargs.pop('params', None)\n if params is None:\n params = self.params\n num_param_combinations = kwargs.pop('num_param_combinations', None)\n if num_param_combinations is None:\n num_param_combinations = self.num_param_combinations\n all_choices = list(itertools.product(*params))\n rng = random.Random(param_seed)\n rng.shuffle(all_choices)\n active_choices = all_choices[:num_param_combinations]\n if args not in active_choices:\n raise NotImplementedError('skipped')", + "docstring": "Limits parameter combinations to choices, chosen pseudo-randomly with fixed seed. Raises NotImplementedError (skip) if not in active set.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\common.py", + "ast_data": "ClassDef name:LimitedParamBenchmark Assign FunctionDef name:setup arg:self arguments arg arg arg Assign Call If Return return:no Assign Call If Compare Assign Assign Call If Compare Assign Assign Call If Compare Assign Assign Call Call Assign Call Call Assign If Compare Raise Call" + }, + { + "library": "cherrypy", + "name": "start", + "source_code": "def start(self):\n opts = ''.join([' PythonOption %s %s\\n' % (k, v) for k, v in self.opts])\n conf_data = self.template % {'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler}\n mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf')\n with open(mpconf, 'wb') as f:\n f.write(conf_data)\n response = read_process(self.apache_path, '-k start -f %s' % mpconf)\n self.ready = True\n return response", + "docstring": "Start an Apache2/httpd server.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpmodpy.py", + "ast_data": "FunctionDef name:start arg:self arguments arg Assign Call Assign Assign Call Call With Call Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "DistributedVariableTraceType", + "source_code": "class DistributedVariableTraceType(trace.TraceType):\n\n def __init__(self, distributed_variable):\n self.distributed_variable = distributed_variable\n self.components = (tuple(distributed_variable.shape.as_list()), distributed_variable.dtype)\n\n def is_subtype_of(self, other):\n return self == other\n\n def most_specific_common_supertype(self, others):\n return self if all((self == other for other in others)) else None\n\n def placeholder_value(self, placeholder_context=None):\n return self.distributed_variable\n\n def to_tensors(self, value):\n return []\n\n def cast(self, value, _):\n return value\n\n def __hash__(self) -> int:\n return hash(self.components)\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, DistributedVariableTraceType):\n return False\n return self.components == other.components", + "docstring": "TraceType of DistributedVariable objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "ClassDef name:DistributedVariableTraceType FunctionDef name:__init__ arg:self arg:distributed_variable arguments arg arg Assign Assign Call Call FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg Return return:yes Call Compare FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:no FunctionDef name:cast arg:self arg:value arg:_ arguments arg arg arg Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare" + }, + { + "library": "pandas", + "name": "read_sql_table", + "source_code": "def read_sql_table(table_name: str, con, schema: str | None=None, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None=None, columns: list[str] | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame | Iterator[DataFrame]:\n check_dtype_backend(dtype_backend)\n if dtype_backend is lib.no_default:\n dtype_backend = 'numpy'\n assert dtype_backend is not lib.no_default\n with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:\n if not pandas_sql.has_table(table_name):\n raise ValueError(f'Table {table_name} not found')\n table = pandas_sql.read_table(table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend)\n if table is not None:\n return table\n else:\n raise ValueError(f'Table {table_name} not found', con)", + "docstring": "Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of `pandas.to_datetimechunksizeDataFrameDataFrameArrowDtypeDataFrame` .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table(\"table_name\", \"postgres:///db_name\") # doctest:+SKIP", + "type": "function", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:read_sql_table arg:table_name arg:con arg:schema arg:index_col arg:coerce_float arg:parse_dates arg:columns arg:chunksize arg:dtype_backend arguments arg arg arg arg arg arg arg arg arg Call If Compare Assign Compare With Call If Call Raise Call Assign Call If Compare Return return:yes Raise Call" + }, + { + "library": "numpy", + "name": "_div", + "source_code": "def _div(mul_f, c1, c2):\n [c1, c2] = as_series([c1, c2])\n if c2[-1] == 0:\n raise ZeroDivisionError\n lc1 = len(c1)\n lc2 = len(c2)\n if lc1 < lc2:\n return (c1[:1] * 0, c1)\n elif lc2 == 1:\n return (c1 / c2[-1], c1[:1] * 0)\n else:\n quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)\n rem = c1\n for i in range(lc1 - lc2, -1, -1):\n p = mul_f([0] * i + [1], c2)\n q = rem[-1] / p[-1]\n rem = rem[:-1] - q * p[:-1]\n quo[i] = q\n return (quo, trimseq(rem))", + "docstring": "Helper function used to implement the `` functions for more detail", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polyutils.py", + "ast_data": "FunctionDef name:_div arg:mul_f arg:c1 arg:c2 arguments arg arg arg Assign Call If Compare Raise Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Call Assign For Call Assign Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_compare_prepare_convert_qconfig_mappings", + "source_code": "def _compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping: QConfigMapping, convert_qconfig_mapping: QConfigMapping):\n assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), 'Expected global qconfigs to be the same in the prepare and convert quantization configs'\n prepare_dicts: list[OrderedDict] = [prepare_qconfig_mapping.object_type_qconfigs, prepare_qconfig_mapping.module_name_qconfigs, prepare_qconfig_mapping.module_name_regex_qconfigs]\n convert_dicts: list[OrderedDict] = [convert_qconfig_mapping.object_type_qconfigs, convert_qconfig_mapping.module_name_qconfigs, convert_qconfig_mapping.module_name_regex_qconfigs]\n dict_names = [_OBJECT_TYPE_DICT_KEY, _MODULE_NAME_DICT_KEY, _MODULE_NAME_REGEX_DICT_KEY]\n for i in range(len(prepare_dicts)):\n for name in prepare_dicts[i].keys():\n assert name in convert_dicts[i], f'Missing key {dict_names[i]} {name} in convert QConfigMapping when it was present in prepare'\n assert convert_dicts[i][name] is None or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), f'Expected convert QConfigMapping to have the same qconfig as prepare for key {dict_names[i]} {name}; prepare: {prepare_dicts[i][name]}; convert: {convert_dicts[i][name]}'", + "docstring": "Compare the qconfig_mapping passed in convert to the one from prepare and check the values Args: : configuration for prepare quantization step : configuration for convert quantization step", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py", + "ast_data": "FunctionDef name:_compare_prepare_convert_qconfig_mappings arg:prepare_qconfig_mapping arg:convert_qconfig_mapping arguments arg arg Call Assign For Call Call For Call Compare BoolOp Compare Call" + }, + { + "library": "tensorflow", + "name": "add_variable", + "source_code": "def add_variable(trackable, name, shape=None, dtype=dtypes.float32, initializer=None, trainable=True):\n return trackable._add_variable_with_custom_getter(name=name, shape=shape, dtype=dtype, initializer=initializer, getter=_default_getter, trainable=trainable)", + "docstring": "Add a variable to a Trackable with no scope influence.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:add_variable arg:trackable arg:name arg:shape arg:dtype arg:initializer arg:trainable arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "mode", + "source_code": "def mode(a, axis=0):\n return _mode(a, axis=axis, keepdims=True)", + "docstring": "Returns an array of the modal (most common) value in the passed array. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Notes ----- For more details, see . Examples -------- >>> import numpy as np >>> from scipy import stats >>> from scipy.stats import mstats >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0]) >>> mstats.mode(m_arr) # note that most zeros are masked ModeResult(mode=array([1.]), count=array([2.]))", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:mode arg:a arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "enable_fake_mode", + "source_code": "@contextlib.contextmanager\ndef enable_fake_mode():\n from torch._subclasses import fake_tensor\n from torch.fx.experimental.symbolic_shapes import ShapeEnv\n fake_mode = fake_tensor.FakeTensorMode(allow_non_fake_inputs=not torch._guards.detect_fake_mode(), shape_env=ShapeEnv(allow_scalar_outputs=False, allow_dynamic_output_shape_ops=False))\n patcher_context = patcher.ONNXTorchPatcher()\n fake_context = ONNXFakeContext(fake_mode=fake_mode)\n with fake_mode, patcher_context:\n yield fake_context\n fake_context.state_dict_paths = tuple(patcher_context.paths)", + "docstring": "Enable fake mode for the duration of the context. Internally it instantiates a :class: context manager that converts user input and model parameters into :class:. A :class: is a :class: with the ability to run PyTorch code without having to actually do computation through tensors allocated on a `` device. Because there is no actual data being allocated on the device, this API allows for initializing and exporting large models without the actual memory footprint needed for executing it. It is highly recommended to initialize the model in fake mode when exporting models that are too large to fit into memory. .. note:: This function does not support torch.onnx.export(..., dynamo=True, optimize=True). Please call ONNXProgram.optimize() outside of the function after the model is exported. Example:: # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) >>> import torch >>> class MyModel(torch.nn.Module): # Model with a parameter ... def __init__(self) -> None: ... super().__init__() ... self.weight = torch.nn.Parameter(torch.tensor(42.0)) ... def forward(self, x): ... return self.weight + x >>> with torch.onnx.enable_fake_mode(): ... # When initialized in fake mode, the model's parameters are fake tensors ... # They do not take up memory so we can initialize large models ... my_nn_module = MyModel() ... arg1 = torch.randn(2, 2, 2) >>> onnx_program = torch.onnx.export(my_nn_module, (arg1,), dynamo=True, optimize=False) >>> # Saving model WITHOUT initializers (only the architecture) >>> onnx_program.save( ... \"my_model_without_initializers.onnx\", ... include_initializers=False, ... keep_initializers_as_inputs=True, ... ) >>> # Saving model WITH initializers after applying concrete weights >>> onnx_program.apply_weights({\"weight\": torch.tensor(42.0)}) >>> onnx_program.save(\"my_model_with_initializers.onnx\") .. warning:: This API is experimental and is *NOT* backward-compatible.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "FunctionDef name:enable_fake_mode arguments Assign Call Call Call Assign Call Assign Call With Assign Call" + }, + { + "library": "pytorch", + "name": "plan", + "source_code": "def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine:\n return self", + "docstring": "First pass to find reuse", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", + "ast_data": "FunctionDef name:plan arg:self arg:state arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "module", + "source_code": "def module(self) -> torch.nn.Module:\n from ._unlift import _unlift_exported_program_lifted_states\n module = _unlift_exported_program_lifted_states(self)\n\n def _train(self, mode: bool=True):\n raise NotImplementedError('Calling train() is not supported yet.')\n\n def _eval(self, mode: bool=True):\n raise NotImplementedError('Calling eval() is not supported yet.')\n module.train = types.MethodType(_train, module)\n module.eval = types.MethodType(_eval, module)\n return module", + "docstring": "Returns a self contained GraphModule with all the parameters/buffers inlined.", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:module arg:self arguments arg Assign Call FunctionDef name:_train arg:self arg:mode arguments arg arg Raise Call FunctionDef name:_eval arg:self arg:mode arguments arg arg Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_process_parameters", + "source_code": "def _process_parameters(self, dim):\n if dim is None or not np.isscalar(dim) or dim < 1 or (dim != int(dim)):\n raise ValueError('Dimension of vector must be specified, and must be an integer greater than 0.')\n return int(dim)", + "docstring": "Dimension N must be specified; it cannot be inferred.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_make_graph_execution_function", + "source_code": "def _make_graph_execution_function(model, mode):\n\n def _per_replica_function(model):\n f = model._make_execution_function(mode)\n return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)\n strategy = model._distribution_strategy\n with strategy.scope():\n grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_function, args=(get_distributed_model(model, mode),))\n init_restore_or_wait_for_variables()\n all_inputs, all_outputs, all_updates, all_session_args = unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args, with_loss_tensor=mode != ModeKeys.PREDICT)\n return backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_{}_function'.format(mode), **all_session_args)", + "docstring": "Makes function to run one step of distributed model in graph mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:_make_graph_execution_function arg:model arg:mode arguments arg arg FunctionDef name:_per_replica_function arg:model arguments arg Assign Call Return return:yes Assign With Call Assign Call Call Call Assign Call Compare Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "call_preflattened", + "source_code": "def call_preflattened(self, args: Sequence[core.Tensor]) -> Any:\n flat_outputs = self.call_flat(*args)\n return self.function_type.pack_output(flat_outputs)", + "docstring": "Calls with flattened tensor inputs and returns the structured output.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py", + "ast_data": "FunctionDef name:call_preflattened arg:self arg:args arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "grab_frame", + "source_code": "@abc.abstractmethod\ndef grab_frame(self, **savefig_kwargs):\n pass", + "docstring": "Grab the image information from the figure and save as a movie frame. All keyword arguments in *savefig_kwargs* are passed on to the call that saves the figure. However, several keyword arguments that are supported by may not be passed as they are controlled by the MovieWriter: - *dpi*, *bbox_inches*: These may not be passed because each frame of the animation much be exactly the same size in pixels. - *format*: This is controlled by the MovieWriter.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:grab_frame arg:self arguments arg arg" + }, + { + "library": "django", + "name": "order_by", + "source_code": "def order_by(self, *field_names):\n if self.query.is_sliced:\n raise TypeError('Cannot reorder a query once a slice has been taken.')\n obj = self._chain()\n obj.query.clear_ordering(force=True, clear_default=False)\n obj.query.add_ordering(*field_names)\n return obj", + "docstring": "Return a new QuerySet instance with the ordering changed.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:order_by arg:self arguments arg arg If Raise Call Assign Call Call Call Return return:yes" + }, + { + "library": "virtualenv", + "name": "Activator", + "source_code": "class Activator(ABC):\n\n def __init__(self, options) -> None:\n self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == '.' else options.prompt\n\n @classmethod\n def supports(cls, interpreter):\n return True\n\n @classmethod\n def add_parser_arguments(cls, parser, interpreter):\n pass\n\n @abstractmethod\n def generate(self, creator):\n raise NotImplementedError", + "docstring": "Generates activate script for the virtual environment.", + "type": "class", + "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py", + "ast_data": "ClassDef name:Activator FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign Compare Call Call FunctionDef name:supports arg:cls arg:interpreter arguments arg arg Return return:yes FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arguments arg arg arg FunctionDef name:generate arg:self arg:creator arguments arg arg Raise" + }, + { + "library": "matplotlib", + "name": "disable_mouse_rotation", + "source_code": "def disable_mouse_rotation(self):\n self.mouse_init(rotate_btn=[], pan_btn=[], zoom_btn=[])", + "docstring": "Disable mouse buttons for 3D rotation, panning, and zooming.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:disable_mouse_rotation arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "RendezvousConnectionError", + "source_code": "class RendezvousConnectionError(RendezvousError):\n pass", + "docstring": "Raised when the connection to a rendezvous backend has failed.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "ClassDef name:RendezvousConnectionError" + }, + { + "library": "pytorch", + "name": "_get_logger_dict_helper", + "source_code": "def _get_logger_dict_helper(mod: nn.Module, target_dict: dict[str, Any], prefix: str='') -> None:\n\n def get_prefix(prefix):\n return prefix if prefix == '' else prefix + '.'\n for name, child in mod.named_children():\n if isinstance(child, Logger):\n target_dict[get_prefix(prefix) + 'stats'] = child.stats\n break\n for name, child in mod.named_children():\n module_prefix = get_prefix(prefix) + name if prefix else name\n _get_logger_dict_helper(child, target_dict, module_prefix)", + "docstring": "This is the helper function for get_logger_dict Args: mod: module we want to save all logger stats prefix: prefix for the current module target_dict: the dictionary used to save all logger stats", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "FunctionDef name:_get_logger_dict_helper arg:mod arg:target_dict arg:prefix arguments arg arg arg FunctionDef name:get_prefix arg:prefix arguments arg Return return:yes Compare For Call If Call Assign Call For Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "WrapperFxCodegen", + "source_code": "class WrapperFxCodegen(PythonWrapperCodegen):\n supports_caching = False\n\n def _generate(self, is_inference: bool) -> tuple[FileBackedGraphModule, None]:\n self.run_wrapper_ir_passes(is_inference)\n prologue = '\\n'.join([self.imports.getvalue(), self.header.getvalue()])\n gm = FxConverter(lines=self.lines, prologue=prologue).generate()\n compiled_fn = self.compile_graph(gm)\n return (FileBackedGraphModule(gm, compiled_fn), None)\n\n def compile_graph(self, gm: GraphModule) -> Callable[..., Any]:\n return gm.forward\n\n @classmethod\n def create(cls, is_subgraph: bool, subgraph_name: Optional[str], parent_wrapper: Optional[PythonWrapperCodegen], partition_signatures: Optional[ir.GraphPartitionSignature]=None) -> 'WrapperFxCodegen':\n if is_subgraph:\n raise NotImplementedError('Subgraphs are not yet supported by FX conversion')\n return cls()", + "docstring": "Backend to generate wrapper code as an FX IR graph.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py", + "ast_data": "ClassDef name:WrapperFxCodegen Assign FunctionDef name:_generate arg:self arg:is_inference arguments arg arg Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call FunctionDef name:compile_graph arg:self arg:gm arguments arg arg Return return:yes FunctionDef name:create arg:cls arg:is_subgraph arg:subgraph_name arg:parent_wrapper arg:partition_signatures arguments arg arg arg arg arg If Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "@available_if(_estimator_has('decision_function'))\ndef decision_function(self, X):\n check_is_fitted(self)\n return self.estimator_.decision_function(self.transform(X))", + "docstring": "Compute the decision function of `classes_`. Regression and binary classification produce an array of shape [n_samples].", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_width", + "source_code": "def get_width(self):\n return self._width", + "docstring": "Return the width (thickness) of the annulus ring.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_width arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_transform", + "source_code": "def get_transform(self):\n return self._transform", + "docstring": "Return the associated with this scale.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "worker_count", + "source_code": "def worker_count(cluster_spec, task_type):\n _validate_cluster_spec(cluster_spec, task_type, task_id=0)\n cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n if task_type not in ['chief', 'worker', 'evaluator']:\n raise ValueError('Unexpected `task_type` %r' % task_type)\n if task_type == 'evaluator':\n return len(cluster_spec['evaluator'])\n else:\n return len(cluster_spec.get('chief', [])) + len(cluster_spec.get('worker', []))", + "docstring": "Returns the number of workers in the cluster.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", + "ast_data": "FunctionDef name:worker_count arg:cluster_spec arg:task_type arguments arg arg Call Assign Call Call If Compare Raise Call If Compare Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "num_replicas_in_sync", + "source_code": "@property\ndef num_replicas_in_sync(self):\n return self._extended._num_replicas_in_sync", + "docstring": "Returns number of replicas over which gradients are aggregated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:num_replicas_in_sync arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "has_multiple_dim_order", + "source_code": "def has_multiple_dim_order(tensor):\n sizes = tensor.size()\n strides = tensor.stride()\n has_duplicate_strides = any((earlier == later for earlier, later in zip(strides, strides[1:])))\n has_singleton_dims = any((size == 1 for size in sizes))\n return has_duplicate_strides or has_singleton_dims", + "docstring": "Returns True if there're multiple legal dim orders for given tensor, False otherwise. The tensor is considered to have multiple legal dim orders if either of the following conditions is met: * Singleton Dimensions: There's at least one singleteon dimension in the tensor. Since their size is 1, they don't affect the memory offset (stride * index is zero because index is always zero). Therefore, they can be placed anywhere in the dimension order without changing how data is accessed. * Same strides: Strides reflect how the tensor is stored in memory. If any two dimensions have the same stride, swapping these dimensions won't change how data is accessed, leading to multiple correct dimension orders.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:has_multiple_dim_order arg:tensor arguments arg Assign Call Assign Call Assign Call Compare Call Assign Call Compare Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "_list_node_dumps", + "source_code": "def _list_node_dumps(self, node_name):\n lines = []\n font_attr_segs = {}\n watch_keys = self._debug_dump.debug_watch_keys(node_name)\n dump_count = 0\n for watch_key in watch_keys:\n debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n for datum in debug_tensor_data:\n line = ' Slot %d @ %s @ %.3f ms' % (datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0)\n lines.append(line)\n command = 'pt %s:%d -n %d' % (node_name, datum.output_slot, dump_count)\n font_attr_segs[len(lines) - 1] = [(2, len(line), debugger_cli_common.MenuItem(None, command))]\n dump_count += 1\n output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n output_with_header = debugger_cli_common.RichTextLines(['%d dumped tensor(s):' % dump_count, ''])\n output_with_header.extend(output)\n return output_with_header", + "docstring": "List dumped tensor data from a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", + "ast_data": "FunctionDef name:_list_node_dumps arg:self arg:node_name arguments arg arg Assign Assign Assign Call Assign For Assign Call For Assign Call Assign Assign Call Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "compute_correspond_epilines", + "source_code": "def compute_correspond_epilines(points: Tensor, F_mat: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points, ['*', 'N', 'DIM'])\n if points.shape[-1] == 2:\n points_h: Tensor = convert_points_to_homogeneous(points)\n elif points.shape[-1] == 3:\n points_h = points\n else:\n raise AssertionError(points.shape)\n KORNIA_CHECK_SHAPE(F_mat, ['*', '3', '3'])\n points_h = torch.transpose(points_h, dim0=-2, dim1=-1)\n a, b, c = torch.chunk(F_mat @ points_h, dim=-2, chunks=3)\n nu: Tensor = a * a + b * b\n nu = where(nu > 0.0, 1.0 / torch.sqrt(nu), torch.ones_like(nu))\n line = torch.cat([a * nu, b * nu, c * nu], dim=-2)\n return torch.transpose(line, dim0=-2, dim1=-1)", + "docstring": "Compute the corresponding epipolar line for a given set of points. Args: points: tensor containing the set of points to project in the shape of :math: or :math:. F_mat: the fundamental to use for projection the points in the shape of :math:. Returns: a tensor with shape :math: containing a vector of the epipolar lines corresponding to the points to the other image. Each line is described as :math: and encoding the vectors as :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py", + "ast_data": "FunctionDef name:compute_correspond_epilines arg:points arg:F_mat arguments arg arg Call If Compare Call If Compare Assign Raise Call Call Assign Call Assign Call Assign Call Compare Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "average_pooling3d", + "source_code": "def average_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n warnings.warn('`tf.layers.average_pooling3d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.AveragePooling3D` instead.')\n layer = AveragePooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n return layer.apply(inputs)", + "docstring": "Average pooling layer for 3D inputs (e.g. volumes). Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py", + "ast_data": "FunctionDef name:average_pooling3d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "eye_array", + "source_code": "def eye_array(m, n=None, *, k=0, dtype=float, format=None):\n return _eye(m, n, k, dtype, format)", + "docstring": "Sparse array of chosen shape with ones on the kth diagonal and zeros elsewhere. Return a sparse array with ones on diagonal. Specifically a sparse array (m x n) where the kth diagonal is all ones and everything else is zeros. Parameters ---------- m : int Number of rows requested. n : int, optional Number of columns. Default: . k : int, optional Diagonal to place ones on. Default: 0 (main diagonal). dtype : dtype, optional Data type of the array format : str, optional (default: \"dia\") Sparse format of the result, e.g., format=\"csr\", etc. Returns ------- new_array : sparse array Sparse array of chosen shape with ones on the kth diagonal and zeros elsewhere. Examples -------- >>> import numpy as np >>> import scipy as sp >>> sp.sparse.eye_array(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> sp.sparse.eye_array(3, dtype=np.int8)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_construct.py", + "ast_data": "FunctionDef name:eye_array arg:m arg:n arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "latest_checkpoint", + "source_code": "@property\ndef latest_checkpoint(self):\n return self._latest_checkpoint", + "docstring": "The prefix of the most recent checkpoint in . Equivalent to where is the constructor argument to . Suitable for passing to to resume training. Returns: The checkpoint prefix. If there are no checkpoints, returns .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py", + "ast_data": "FunctionDef name:latest_checkpoint arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "pct_change", + "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef pct_change(self, periods: int=1, fill_method: None=None, freq=None):\n if fill_method is not None:\n raise ValueError(f'fill_method must be None; got fill_method={fill_method!r}.')\n if freq is not None:\n f = lambda x: x.pct_change(periods=periods, freq=freq, axis=0)\n return self._python_apply_general(f, self._selected_obj, is_transform=True)\n if fill_method is None:\n op = 'ffill'\n else:\n op = fill_method\n filled = getattr(self, op)(limit=0)\n fill_grp = filled.groupby(self._grouper.codes, group_keys=self.group_keys)\n shifted = fill_grp.shift(periods=periods, freq=freq)\n return filled / shifted - 1", + "docstring": "Calculate pct_change of each value to previous entry in group. Parameters ---------- periods : int, default 1 Periods to shift for calculating percentage change. Comparing with a period of 1 means adjacent elements are compared, whereas a period of 2 compares every other element. fill_method : None Must be None. This argument will be removed in a future version of pandas. .. deprecated:: 2.1 All options of are deprecated except . freq : str, pandas offset object, or None, default None The frequency increment for time series data (e.g., 'M' for month-end). If None, the frequency is inferred from the index. Relevant for time series data only. Returns ------- Series or DataFrame Percentage changes within each group. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"b\", \"b\"] >>> ser = pd.Series([1, 2, 3, 4], index=lst) >>> ser a 1 a 2 b 3 b 4 dtype: int64 >>> ser.groupby(level=0).pct_change() a NaN a 1.000000 b NaN b 0.333333 dtype: float64 For DataFrameGroupBy: >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] >>> df = pd.DataFrame( ... data, ... columns=[\"a\", \"b\", \"c\"], ... index=[\"tuna\", \"salmon\", \"catfish\", \"goldfish\"], ... ) >>> df a b c tuna 1 2 3 salmon 1 5 6 catfish 2 5 8 goldfish 2 6 9 >>> df.groupby(\"a\").pct_change() b c tuna NaN NaN salmon 1.5 1.000 catfish NaN NaN goldfish 0.2 0.125", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:pct_change arg:self arg:periods arg:fill_method arg:freq arguments arg arg arg arg If Compare Raise Call If Compare Assign arguments arg Call Return return:yes Call If Compare Assign Assign Assign Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_reorder_columns", + "source_code": "def _reorder_columns(self, res, data):\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))", + "docstring": "Reorder result columns to match original order with new columns appended.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\groupby.py", + "ast_data": "FunctionDef name:_reorder_columns arg:self arg:res arg:data arguments arg arg arg Assign Compare Compare Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_variational_recurrent_dropout_value", + "source_code": "def _variational_recurrent_dropout_value(self, unused_index, value, noise, keep_prob):\n random_tensor = keep_prob + noise\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.divide(value, keep_prob) * binary_tensor\n ret.set_shape(value.get_shape())\n return ret", + "docstring": "Performs dropout given the pre-calculated noise tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py", + "ast_data": "FunctionDef name:_variational_recurrent_dropout_value arg:self arg:unused_index arg:value arg:noise arg:keep_prob arguments arg arg arg arg arg Assign Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "OnnxExporterWarning", + "source_code": "class OnnxExporterWarning(UserWarning):\n pass", + "docstring": "Warnings in the ONNX exporter.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\errors.py", + "ast_data": "ClassDef name:OnnxExporterWarning" + }, + { + "library": "sphinx", + "name": "get_inventory_and_name_suffix", + "source_code": "def get_inventory_and_name_suffix(self, name: str) -> tuple[str | None, str]:\n assert name.startswith('external'), name\n suffix = name[9:]\n if name[8] == '+':\n inv_name, suffix = suffix.split(':', 1)\n return (inv_name, suffix)\n elif name[8] == ':':\n return (None, suffix)\n else:\n msg = f'Malformed :external: role name: {name}'\n raise ValueError(msg)", + "docstring": "Extract an inventory name (if any) and `` -- any inventory, explicit domain and name.", + "type": "method", + "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py", + "ast_data": "FunctionDef name:get_inventory_and_name_suffix arg:self arg:name arguments arg arg Call Assign If Compare Assign Call Return return:yes If Compare Return return:yes Assign Raise Call" + }, + { + "library": "pandas", + "name": "is_interval_dtype", + "source_code": "def is_interval_dtype(arr_or_dtype) -> bool:\n warnings.warn('is_interval_dtype is deprecated and will be removed in a future version. Use `isinstance(dtype, pd.IntervalDtype)` instead', DeprecationWarning, stacklevel=2)\n if isinstance(arr_or_dtype, ExtensionDtype):\n return arr_or_dtype.type is Interval\n if arr_or_dtype is None:\n return False\n return IntervalDtype.is_dtype(arr_or_dtype)", + "docstring": "Check whether an array-like or dtype is of the Interval dtype. .. deprecated:: 2.2.0 Use isinstance(dtype, pd.IntervalDtype) instead. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Interval dtype. See Also -------- api.types.is_object_dtype : Check whether an array-like or dtype is of the object dtype. api.types.is_numeric_dtype : Check whether the provided array or dtype is of a numeric dtype. api.types.is_categorical_dtype : Check whether an array-like or dtype is of the Categorical dtype. Examples -------- >>> from pandas.core.dtypes.common import is_interval_dtype >>> is_interval_dtype(object) False >>> is_interval_dtype(pd.IntervalDtype()) True >>> is_interval_dtype([1, 2, 3]) False >>> >>> interval = pd.Interval(1, 2, closed=\"right\") >>> is_interval_dtype(interval) False >>> is_interval_dtype(pd.IntervalIndex([interval])) True", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\common.py", + "ast_data": "FunctionDef name:is_interval_dtype arg:arr_or_dtype arguments arg Call If Call Return return:yes Compare If Compare Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "alpha_max", + "source_code": "def alpha_max(emp_cov):\n A = np.copy(emp_cov)\n A.flat[::A.shape[0] + 1] = 0\n return np.max(np.abs(A))", + "docstring": "Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) The sample covariance matrix. Notes ----- This results from the bound for the all the Lasso that are solved in GraphicalLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by , the result follows.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py", + "ast_data": "FunctionDef name:alpha_max arg:emp_cov arguments arg Assign Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "random_crop", + "source_code": "@tf_export('image.random_crop', v1=['image.random_crop', 'random_crop'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_crop')\ndef random_crop(value, size, seed=None, name=None):\n with ops.name_scope(name, 'random_crop', [value, size]) as name:\n value = ops.convert_to_tensor(value, name='value')\n size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size')\n shape = array_ops.shape(value)\n check = control_flow_assert.Assert(math_ops.reduce_all(shape >= size), ['Need value.shape >= size, got ', shape, size], summarize=1000)\n shape = control_flow_ops.with_dependencies([check], shape)\n limit = shape - size + 1\n offset = random_ops.random_uniform(array_ops.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=seed) % limit\n return array_ops.slice(value, offset, size, name=name)", + "docstring": "Randomly crops a tensor to a given size. Slices a shape portion out of at a uniformly chosen offset. Requires . If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with . Example usage: >>> image = [[1, 2, 3], [4, 5, 6]] >>> result = tf.image.random_crop(value=image, size=(1, 3)) >>> result.shape.as_list() [1, 3] For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: value: Input tensor to crop. size: 1-D tensor with size the rank of . seed: Python integer. Used to create a random seed. See for behavior. name: A name for this operation (optional). Returns: A cropped tensor of the same rank as and shape .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_crop_ops.py", + "ast_data": "FunctionDef name:random_crop arg:value arg:size arg:seed arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Compare Assign Call Assign Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "cherrypy", + "name": "resume", + "source_code": "@cherrypy.expose\ndef resume(self, namespace):\n logging.statistics.get(namespace, {})['Enabled'] = True\n raise cherrypy.HTTPRedirect('./')", + "docstring": "Resume gathering the statistics.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:resume arg:self arg:namespace arguments arg arg Assign Call Raise Call" + }, + { + "library": "pytorch", + "name": "remove_guards_with_source", + "source_code": "def remove_guards_with_source(self, source):\n from ._dynamo.source import is_from_source\n self.inner = {g for g in self.inner if not is_from_source(g.originating_source, source)}", + "docstring": "Delete all guards that contains a given source", + "type": "method", + "file_path": "pytorch\\torch\\_guards.py", + "ast_data": "FunctionDef name:remove_guards_with_source arg:self arg:source arguments arg arg Assign Call" + }, + { + "library": "tensorflow", + "name": "_unary_op", + "source_code": "def _unary_op(fn):\n\n def unary_op_wrapper(x, name=None):\n return fn(x, name=name)\n return unary_op_wrapper", + "docstring": "Wrapper that restricts to have the correct signature.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py", + "ast_data": "FunctionDef name:_unary_op arg:fn arguments arg FunctionDef name:unary_op_wrapper arg:x arg:name arguments arg arg Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "_escape_latex", + "source_code": "def _escape_latex(s: str) -> str:\n return s.replace('\\\\', 'ab2§=§8yz').replace('ab2§=§8yz ', 'ab2§=§8yz\\\\space ').replace('&', '\\\\&').replace('%', '\\\\%').replace('$', '\\\\$').replace('", + "docstring": "Replace the characters `` in the string with LaTeX-safe sequences. Use this if you need to display text that might contain such characters in LaTeX. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_escape_latex arg:s arguments arg Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_paddings_constant", + "source_code": "def _get_paddings_constant(paddings):\n if isinstance(paddings, tensor_lib.Tensor):\n return tensor_util.constant_value(paddings, partial=True)\n elif isinstance(paddings, (list, tuple)):\n return [_get_paddings_constant(x) for x in paddings]\n else:\n return paddings", + "docstring": "Helper to get the constant values of the paddings arg to pad(). Used under V1 graph mode to facilitate computation of the shape of the output tensor of . Args: paddings: The same paddings arg as passed to pad(). Can be a Tensor, or a nested list or tuple of Tensor and/or numbers. Returns: A nested list or numbers or , in which indicates unknown padding size.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:_get_paddings_constant arg:paddings arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "import_module", + "source_code": "def import_module(self, name: str, package=None):\n name = self._mangler.demangle(name)\n return self._gcd_import(name)", + "docstring": "Load a module from the package if it hasn't already been loaded, and then return the module. Modules are loaded locally to the importer and will appear in ``. Returns: types.ModuleType: The (possibly already) loaded module.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_importer.py", + "ast_data": "FunctionDef name:import_module arg:self arg:name arg:package arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_restore_updates", + "source_code": "def _restore_updates(self):\n data_dict = {}\n for name, var in self.state_variables.items():\n data_dict[name] = var.numpy()\n return data_dict", + "docstring": "Recreates a dict of updates from the layer's weights.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", + "ast_data": "FunctionDef name:_restore_updates arg:self arguments arg Assign For Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_signatures_from_saved_model", + "source_code": "def get_signatures_from_saved_model(saved_model_path: str, signature_keys: Optional[Sequence[str]]=None, tags: Optional[Collection[str]]=None) -> Dict[str, meta_graph_pb2.SignatureDef]:\n if tags is None:\n tags = {tag_constants.SERVING}\n loader = saved_model_loader.SavedModelLoader(saved_model_path)\n meta_graphdef = loader.get_meta_graph_def_from_tags(tags)\n signatures = {}\n for key, signature_def in meta_graphdef.signature_def.items():\n if key == saved_model_constants.INIT_OP_SIGNATURE_KEY:\n continue\n if signature_keys is not None and key not in signature_keys:\n continue\n signatures[key] = signature_def\n return signatures", + "docstring": "Gets a map from signature keys to their SignatureDef. Args: saved_model_path: Path to the saved model. signature_keys: List of keys identifying SignatureDef to retrieve. If None, retrieve all except the init signature. tags: Set of tags identifying the MetaGraphDef within the SavedModel. Returns: A map from signature_key to its SignatureDef.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py", + "ast_data": "FunctionDef name:get_signatures_from_saved_model arg:saved_model_path arg:signature_keys arg:tags arguments arg arg arg If Compare Assign Assign Call Assign Call Assign For Call If Compare If BoolOp Compare Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_run_key", + "source_code": "def get_run_key(feed_dict, fetches):\n return json.dumps(RunKey(get_flattened_names(feed_dict), get_flattened_names(fetches)))", + "docstring": "Summarize the names of feeds and fetches as a RunKey JSON string. Args: feed_dict: The feed_dict given to the call. fetches: The fetches from the call. Returns: A JSON Array consisting of two items. They first items is a flattened Array of the names of the feeds. The second item is a flattened Array of the names of the fetches.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\common.py", + "ast_data": "FunctionDef name:get_run_key arg:feed_dict arg:fetches arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "_unmultiplied_rgba8888_to_premultiplied_argb32", + "source_code": "def _unmultiplied_rgba8888_to_premultiplied_argb32(rgba8888):\n if sys.byteorder == 'little':\n argb32 = np.take(rgba8888, [2, 1, 0, 3], axis=2)\n rgb24 = argb32[..., :-1]\n alpha8 = argb32[..., -1:]\n else:\n argb32 = np.take(rgba8888, [3, 0, 1, 2], axis=2)\n alpha8 = argb32[..., :1]\n rgb24 = argb32[..., 1:]\n if alpha8.min() != 255:\n np.multiply(rgb24, alpha8 / 255, out=rgb24, casting='unsafe')\n return argb32", + "docstring": "Convert an unmultiplied RGBA8888 buffer to a premultiplied ARGB32 buffer.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_unmultiplied_rgba8888_to_premultiplied_argb32 arg:rgba8888 arguments arg If Compare Assign Call Assign Assign Assign Call Assign Assign If Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "format", + "source_code": "def format(self):\n return traceback.format_list(self.summary())", + "docstring": "Formats a single torch._C._profiler.CapturedTraceback into a list of strings equivalent to the output of traceback.format_list. Note that if pass it CapturedTraceback with C++ traces, it is better not to use this function and use the batch formatting API format_captured_tbs to amortize the cost of symbolization", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_traceback.py", + "ast_data": "FunctionDef name:format arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_ensure_in_unit_hypercube", + "source_code": "def _ensure_in_unit_hypercube(sample: 'npt.ArrayLike') -> np.ndarray:\n sample = np.asarray(sample, dtype=np.float64, order='C')\n if not sample.ndim == 2:\n raise ValueError('Sample is not a 2D array')\n if sample.max() > 1.0 or sample.min() < 0.0:\n raise ValueError('Sample is not in unit hypercube')\n return sample", + "docstring": "Ensure that sample is a 2D array and is within a unit hypercube Parameters ---------- sample : array_like (n, d) A 2D array of points. Returns ------- np.ndarray The array interpretation of the input sample Raises ------ ValueError If the input is not a 2D array or contains points outside of a unit hypercube.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_ensure_in_unit_hypercube arg:sample arguments arg Assign Call If Compare Raise Call If BoolOp Compare Call Compare Call Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "__class_getitem__", + "source_code": "@classmethod\ndef __class_getitem__(cls, arg, /):\n from types import GenericAlias\n return GenericAlias(cls, arg)", + "docstring": "Return a parametrized wrapper around the type. .. versionadded:: 1.16.0 Returns ------- alias : types.GenericAlias A parametrized type. Examples -------- >>> import numpy as np >>> from scipy.sparse import coo_matrix >>> coo_matrix[np.int8] scipy.sparse._coo.coo_matrix[numpy.int8]", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_matrix.py", + "ast_data": "FunctionDef name:__class_getitem__ arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Leon", + "source_code": "class Leon(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-1.2] * self.N, [1.2] * self.N))\n self.global_optimum = [[1 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return 100.0 * (x[1] - x[0] ** 2.0) ** 2.0 + (1 - x[0]) ** 2.0", + "docstring": "Leon objective function. This class defines the Leon [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Leon}}(\\mathbf{x}) = \\left(1 - x_{1}\\right)^{2} + 100 \\left(x_{2} - x_{1}^{2} \\right)^{2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py", + "ast_data": "ClassDef name:Leon FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "join", + "source_code": "def join(self):\n c_api.TF_ServerJoin(self._server)", + "docstring": "Blocks until the server has shut down. This method currently blocks forever. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while joining the TensorFlow server.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py", + "ast_data": "FunctionDef name:join arg:self arguments arg Call" + }, + { + "library": "kornia", + "name": "to_jax", + "source_code": "def to_jax() -> ModuleType:\n return ivy.transpile(kornia, source='torch', target='jax')", + "docstring": "Convert Kornia to JAX. Transpiles the Kornia library to JAX using [ivy]( The transpilation process occurs lazily, so the transpilation on a given kornia function/class will only occur when it's called or instantiated for the first time. This will make any functions/classes slow when being used for the first time, but any subsequent uses should be as fast as expected. Return: The Kornia library transpiled to JAX Example: .. highlight:: python .. code-block:: python import kornia jax_kornia = kornia.to_jax() import jax input = jax.random.normal(jax.random.key(42), shape=(2, 3, 4, 5)) gray = jax_kornia.color.gray.rgb_to_grayscale(input)", + "type": "function", + "file_path": "kornia\\kornia\\transpiler\\transpiler.py", + "ast_data": "FunctionDef name:to_jax arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "attributes", + "source_code": "@property\ndef attributes(self) -> Any:\n attrs = self.definition.attr\n attrs.pop(attributes_lib.EAGER_RUNTIME_CONSTRUCTION_CONTEXT, None)\n return attrs", + "docstring": "Returns FunctionDef attributes in the Runtime.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py", + "ast_data": "FunctionDef name:attributes arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "next_sample", + "source_code": "def next_sample(uid):\n return next(_SHARED_SEQUENCES[uid])", + "docstring": "Gets the next value from the generator . To allow multiple generators to be used at the same time, we use to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:next_sample arg:uid arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "select_decomp_table", + "source_code": "def select_decomp_table() -> dict[Any, Callable[..., Any]]:\n if config.fallback_random:\n return decompositions\n return fast_random_decomps()", + "docstring": "decomps can change based on config", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\decomposition.py", + "ast_data": "FunctionDef name:select_decomp_table arguments If Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "EnumSessions", + "source_code": "def EnumSessions(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", + "docstring": "Enumerate existing sessions and return available profile tools.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py", + "ast_data": "FunctionDef name:EnumSessions arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_NamedTupleCodec", + "source_code": "class _NamedTupleCodec:\n\n def can_encode(self, pyobj):\n return _is_named_tuple(pyobj)\n\n def do_encode(self, named_tuple_value, encode_fn):\n encoded_named_tuple = struct_pb2.StructuredValue()\n encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue())\n encoded_named_tuple.named_tuple_value.name = named_tuple_value.__class__.__name__\n for key in named_tuple_value._fields:\n pair = encoded_named_tuple.named_tuple_value.values.add()\n pair.key = key\n pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key]))\n return encoded_named_tuple\n\n def can_decode(self, value):\n return value.HasField('named_tuple_value')\n\n def do_decode(self, value, decode_fn):\n key_value_pairs = value.named_tuple_value.values\n items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs]\n named_tuple_type = collections.namedtuple(value.named_tuple_value.name, [item[0] for item in items])\n return named_tuple_type(**dict(items))", + "docstring": "Codec for namedtuples. Encoding and decoding a namedtuple reconstructs a namedtuple with a different actual Python type, but with the same and .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "ClassDef name:_NamedTupleCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:named_tuple_value arg:encode_fn arguments arg arg arg Assign Call Call Call Assign For Assign Call Assign Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "pop", + "source_code": "def pop(self, key: str) -> Any:\n v = self[key]\n del self[key]\n return v", + "docstring": "Remove key from the ParameterDict and return its parameter. Args: key (str): key to pop from the ParameterDict", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\container.py", + "ast_data": "FunctionDef name:pop arg:self arg:key arguments arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "node_name_from_input", + "source_code": "def node_name_from_input(node_name: str) -> str:\n if node_name.startswith('^'):\n node_name = node_name[1:]\n m = re.search('(.*):\\\\d+$', node_name)\n if m:\n node_name = m.group(1)\n return node_name", + "docstring": "Strips off ports and other decorations to get the underlying node name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py", + "ast_data": "FunctionDef name:node_name_from_input arg:node_name arguments arg If Call Assign Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get", + "source_code": "def get(self, timeout=None, tag=None):\n with self._queue_lock:\n while self._should_process_closures and self._queue.empty() and (tag is None or self._tagged_queue[tag].empty()):\n if not self._closures_queued_condition.wait(timeout=timeout):\n return None\n if not self._should_process_closures:\n return None\n if tag is not None and (not self._tagged_queue[tag].empty()):\n closure = self._tagged_queue[tag].get(block=False)\n return closure\n closure = self._queue.get(block=False)\n metric_utils.monitor_int('queued_closures', self._queue.qsize())\n assert closure.tag is None\n assert tag is None or self._tagged_queue[tag].empty()\n self._queue_free_slot_condition.notify()\n self.inflight_closure_count += 1\n return closure", + "docstring": "Return a closure from the queue to be executed. It will try to fetch an item from the queue with the given tag. If this queue is empty, it will then check the global queue. Args: timeout: timeout when waiting for a closure to be put. tag: optional tag to specify which queue to query first before querying the global queue. Returns: a closure or None after timeout.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:get arg:self arg:timeout arg:tag arguments arg arg arg With While BoolOp Call BoolOp Compare Call If Call Return return:no If Return return:no If BoolOp Compare Call Assign Call Return return:yes Assign Call Call Call Compare BoolOp Compare Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "asarray", + "source_code": "def asarray(obj: Array | bool | int | float | complex | NestedSequence[bool | int | float | complex] | SupportsBufferProtocol, /, *, dtype: Optional[DType]=None, device: Optional[Device]=None, copy: Optional[bool]=None, **kwargs) -> Array:\n with cp.cuda.Device(device):\n if copy is None:\n return cp.asarray(obj, dtype=dtype, **kwargs)\n else:\n res = cp.array(obj, dtype=dtype, copy=copy, **kwargs)\n if not copy and res is not obj:\n raise ValueError('Unable to avoid copy while creating an array as requested')\n return res", + "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_aliases.py", + "ast_data": "FunctionDef name:asarray arguments arg arg arg arg arg With Call If Compare Return return:yes Call Assign Call If BoolOp Compare Raise Call Return return:yes" + }, + { + "library": "django", + "name": "select_for_update", + "source_code": "def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):\n if nowait and skip_locked:\n raise ValueError('The nowait option cannot be used with skip_locked.')\n obj = self._chain()\n obj._for_write = True\n obj.query.select_for_update = True\n obj.query.select_for_update_nowait = nowait\n obj.query.select_for_update_skip_locked = skip_locked\n obj.query.select_for_update_of = of\n obj.query.select_for_no_key_update = no_key\n return obj", + "docstring": "Return a new QuerySet instance that will select objects with a FOR UPDATE lock.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:select_for_update arg:self arg:nowait arg:skip_locked arg:of arg:no_key arguments arg arg arg arg arg If BoolOp Raise Call Assign Call Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "SplitPoint", + "source_code": "class SplitPoint(Enum):\n BEGINNING = 1\n END = 2", + "docstring": "Enum representing the points at which a split can occur in the execution of a submodule. Attributes: BEGINNING: Represents adding a split point *before* the execution of a certain submodule in the function. END: Represents adding a split point *after* the execution of a certain submodule in the function.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", + "ast_data": "ClassDef name:SplitPoint Assign Assign" + }, + { + "library": "pandas", + "name": "month_name", + "source_code": "def month_name(self, locale=None) -> npt.NDArray[np.object_]:\n values = self._local_timestamps()\n result = fields.get_date_name_field(values, 'month_name', locale=locale, reso=self._creso)\n result = self._maybe_mask_results(result, fill_value=None)\n if using_string_dtype():\n from pandas import StringDtype, array as pd_array\n return pd_array(result, dtype=StringDtype(na_value=np.nan))\n return result", + "docstring": "Return the month names with specified locale. Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale (`` will return month names in Brazilian Portuguese language. >>> idx = pd.date_range(start=\"2018-01\", freq=\"ME\", periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='ME') >>> idx.month_name(locale=\"pt_BR.utf8\") # doctest: +SKIP Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", + "ast_data": "FunctionDef name:month_name arg:self arg:locale arguments arg arg Assign Call Assign Call Assign Call If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "squared_distance", + "source_code": "def squared_distance(self, point: Tensor) -> Tensor:\n diff: Tensor = point - self.origin\n return squared_norm(diff - self.direction @ diff * self.direction)", + "docstring": "Return the squared distance of a point to its projection onte the line. Args: point: the point to calculate the distance onto the line.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\line.py", + "ast_data": "FunctionDef name:squared_distance arg:self arg:point arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "get_context_data", + "source_code": "def get_context_data(self, **kwargs):\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form()\n return super().get_context_data(**kwargs)", + "docstring": "Insert the form into the context dict.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_get_codes_for_sorting", + "source_code": "def _get_codes_for_sorting(self) -> list[Categorical]:\n\n def cats(level_codes: np.ndarray) -> np.ndarray:\n return np.arange(level_codes.max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype)\n return [Categorical.from_codes(level_codes, cats(level_codes), True, validate=False) for level_codes in self.codes]", + "docstring": "we are categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:_get_codes_for_sorting arg:self arguments arg FunctionDef name:cats arg:level_codes arguments arg Return return:yes Call Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "combine_first", + "source_code": "def combine_first(self, other: DataFrame) -> DataFrame:\n from pandas.core.computation import expressions\n\n def combiner(x: Series, y: Series):\n mask = x.isna()._values\n x_values = x._values\n y_values = y._values\n if y.name not in self.columns:\n return y_values\n return expressions.where(mask, y_values, x_values)\n if len(other) == 0:\n combined = self.reindex(self.columns.append(other.columns.difference(self.columns)), axis=1)\n combined = combined.astype(other.dtypes)\n else:\n combined = self.combine(other, combiner, overwrite=False)\n dtypes = {col: find_common_type([self.dtypes[col], other.dtypes[col]]) for col in self.columns.intersection(other.columns) if combined.dtypes[col] != self.dtypes[col]}\n if dtypes:\n combined = combined.astype(dtypes)\n return combined.__finalize__(self, method='combine_first')", + "docstring": "Update null elements with value in the same location in . Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. The resulting dataframe contains the 'first' dataframe values and overrides the second one values where both first.loc[index, col] and second.loc[index, col] are not missing values, upon calling first.combine_first(second). Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame The result of combining the provided DataFrame with the other object. See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({\"A\": [None, 0], \"B\": [None, 4]}) >>> df2 = pd.DataFrame({\"A\": [1, 1], \"B\": [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in >>> df1 = pd.DataFrame({\"A\": [None, 0], \"B\": [4, None]}) >>> df2 = pd.DataFrame({\"B\": [3, 3], \"C\": [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:combine_first arg:self arg:other arguments arg arg FunctionDef name:combiner arg:x arg:y arguments arg arg Assign Call Assign Assign If Compare Return return:yes Return return:yes Call If Compare Call Assign Call Call Call Assign Call Assign Call Assign Call Call Compare If Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "predict_generator", + "source_code": "def predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):\n warnings.warn('`Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.')\n return self.predict(generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks)", + "docstring": "Generates predictions for the input samples from a data generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:predict_generator arg:self arg:generator arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arg:verbose arguments arg arg arg arg arg arg arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_SymNodeDict", + "source_code": "class _SymNodeDict:\n\n def __init__(self) -> None:\n self.sym_node_dict: dict[PySymType, _PySymProxyType] = {}\n\n def __setitem__(self, key: PySymType, value: _PySymProxyType) -> None:\n self.sym_node_dict[key.node] = value\n\n def __getitem__(self, key: PySymType) -> _PySymProxyType:\n return self.sym_node_dict[key.node]\n\n def __contains__(self, key: PySymType) -> bool:\n return key.node in self.sym_node_dict\n\n def get(self, key: PySymType, default: Optional[_PySymProxyType]=None) -> _PySymProxyType:\n return self.sym_node_dict.get(key.node, default)\n\n def __iter__(self) -> Any:\n raise NotImplementedError\n\n def __len__(self) -> int:\n return len(self.sym_node_dict)", + "docstring": "Wrapper around a dictionary that will hash SymInts with their nodes", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py", + "ast_data": "ClassDef name:_SymNodeDict FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Raise FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "one_host_numpy_dataset", + "source_code": "def one_host_numpy_dataset(numpy_input, colocate_with, session):\n\n def create_colocated_variable(next_creator, **kwargs):\n kwargs['colocate_with'] = colocate_with\n return next_creator(**kwargs)\n numpy_flat = nest.flatten(numpy_input)\n with variable_scope.variable_creator_scope(create_colocated_variable):\n vars_flat = tuple((variable_v1.VariableV1(array_ops.zeros(i.shape, i.dtype), trainable=False) for i in numpy_flat))\n for v, i in zip(vars_flat, numpy_flat):\n init_var_from_numpy(v, i, session)\n vars_nested = nest.pack_sequence_as(numpy_input, vars_flat)\n return dataset_ops.Dataset.from_tensor_slices(vars_nested)", + "docstring": "Create a dataset on from .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\numpy_dataset.py", + "ast_data": "FunctionDef name:one_host_numpy_dataset arg:numpy_input arg:colocate_with arg:session arguments arg arg arg FunctionDef name:create_colocated_variable arg:next_creator arguments arg arg Assign Return return:yes Call Assign Call With Call Assign Call Call Call For Call Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "legval2d", + "source_code": "def legval2d(x, y, c):\n return pu._valnd(legval, c, x, y)", + "docstring": "Evaluate a 2-D Legendre series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- legval, leggrid2d, legval3d, leggrid3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "camtoworld_to_worldtocam_Rt", + "source_code": "def camtoworld_to_worldtocam_Rt(R: Tensor, t: Tensor) -> tuple[Tensor, Tensor]:\n KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n R_inv = R.transpose(1, 2)\n new_t: Tensor = -R_inv @ t\n return (R_inv, new_t)", + "docstring": "Convert camtoworld to worldtocam frame used in Colmap. See long-url: Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: Rinv: Rotation matrix, :math: tinv: Translation matrix :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> camtoworld_to_worldtocam_Rt(R, t) (tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]), tensor([[[-1.], [-1.], [-1.]]]))", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:camtoworld_to_worldtocam_Rt arg:R arg:t arguments arg arg Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "sem", + "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sem')\ndef sem(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any:\n result = super().sem(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs)\n if isinstance(result, Series):\n result = result.__finalize__(self, method='sem')\n return result", + "docstring": "Return unbiased standard error of the mean over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument Parameters ---------- axis : {index (0), columns (1)} For this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.sem with `numeric_onlyTrue` to avoid getting an error. >>> df = pd.DataFrame({\"a\": [1, 2], \"b\": [\"T\", \"Z\"]}, index=[\"tiger\", \"zebra\"]) >>> df.sem(numeric_only=True) a 0.5 dtype: float64", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:sem arg:self arg:axis arg:skipna arg:ddof arg:numeric_only arguments arg arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "retrieve_bazel_version", + "source_code": "def retrieve_bazel_version():\n bazel_executable = shutil.which('bazel')\n if bazel_executable is None:\n bazel_executable = shutil.which('bazelisk')\n if bazel_executable is None:\n print('Cannot find bazel. Please install bazel/bazelisk.')\n sys.exit(1)\n stderr = open(os.devnull, 'wb')\n curr_version = run_shell([bazel_executable, '--version'], allow_non_zero=True, stderr=stderr)\n if curr_version.startswith('bazel '):\n curr_version = curr_version.split('bazel ')[1]\n curr_version_int = convert_version_to_int(curr_version)\n if not curr_version_int:\n print('WARNING: current bazel installation is not a release version.')\n return curr_version\n print('You have bazel %s installed.' % curr_version)\n return curr_version", + "docstring": "Retrieve installed bazel version (or bazelisk). Returns: The bazel version detected.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:retrieve_bazel_version arguments Assign Call If Compare Assign Call If Compare Call Call Assign Call Assign Call If Call Assign Call Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "max_over_ndim", + "source_code": "def max_over_ndim(input, axis_list, keepdim=False):\n axis_list.sort(reverse=True)\n for axis in axis_list:\n input, _ = input.max(axis, keepdim)\n return input", + "docstring": "Apply 'torch.max' over the given axes.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py", + "ast_data": "FunctionDef name:max_over_ndim arg:input arg:axis_list arg:keepdim arguments arg arg arg Call For Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "getfullargspec_no_self", + "source_code": "def getfullargspec_no_self(func):\n sig = inspect.signature(func)\n args = [p.name for p in sig.parameters.values() if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n varargs = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL]\n varargs = varargs[0] if varargs else None\n varkw = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD]\n varkw = varkw[0] if varkw else None\n defaults = tuple((p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty)) or None\n kwonlyargs = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.KEYWORD_ONLY]\n kwdefaults = {p.name: p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.KEYWORD_ONLY and p.default is not p.empty}\n annotations = {p.name: p.annotation for p in sig.parameters.values() if p.annotation is not p.empty}\n return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwdefaults or None, annotations)", + "docstring": "inspect.getfullargspec replacement using inspect.signature. If func is a bound method, do not list the 'self' parameter. Parameters ---------- func : callable A callable to inspect Returns ------- fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) NOTE: if the first argument of is self, it is *not*, I repeat *not*, included in fullargspec.args. This is done for consistency between inspect.getargspec() under Python 2.x, and inspect.signature() under Python 3.x.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_util.py", + "ast_data": "FunctionDef name:getfullargspec_no_self arg:func arguments arg Assign Call Assign Call Compare Assign Call Compare Assign Assign Call Compare Assign Assign BoolOp Call Call BoolOp Compare Compare Assign Call Compare Assign Call BoolOp Compare Compare Assign Call Compare Return return:yes Call BoolOp" + }, + { + "library": "pytorch", + "name": "_lazy_load_hook", + "source_code": "def _lazy_load_hook(self: _LazyProtocol, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):\n key = prefix + name\n if key in state_dict and param is not None:\n input_param = state_dict[key]\n if is_lazy(param):\n if not is_lazy(input_param):\n with torch.no_grad():\n param.materialize(input_param.shape)", + "docstring": "load_state_dict pre-hook function for lazy buffers and parameters. The purpose of this hook is to adjust the current state and/or `` for the details of the hook specification.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\lazy.py", + "ast_data": "FunctionDef name:_lazy_load_hook arg:self arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg For Call Call Call Assign If BoolOp Compare Compare Assign If Call If Call With Call Call" + }, + { + "library": "tensorflow", + "name": "_check_stop", + "source_code": "def _check_stop(self):\n return False", + "docstring": "Hook for subclasses to provide their own stop condition. Returns: True if the session should stop, False otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:_check_stop arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "argmin", + "source_code": "def argmin(self, axis=None, out=None, *, explicit=False):\n return self._argminmax(axis, out, np.argmin, np.less, explicit)", + "docstring": "Return indices of minimum elements along an axis. By default, implicit zero elements are taken into account. If there are several minimum values, the index of the first occurrence is returned. If is set, only explicitly stored elements will be considered. Parameters ---------- axis : {-2, -1, 0, 1, None}, optional Axis along which the argmin is computed. If None (default), index of the minimum element in the flatten data is returned. out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. explicit : {False, True} optional (default: False) When set to True, only explicitly stored elements will be considered. If axis is not None and an axis has no stored elements, argmin is undefined, so the index `axis` is 1.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_data.py", + "ast_data": "FunctionDef name:argmin arg:self arg:axis arg:out arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_cycle_colors", + "source_code": "def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]:\n max_colors = max(num_colors, len(colors))\n yield from itertools.islice(itertools.cycle(colors), max_colors)", + "docstring": "Cycle colors until achieving max of or length of . Extra colors will be ignored by matplotlib if there are more colors than needed and nothing needs to be done here.", + "type": "function", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py", + "ast_data": "FunctionDef name:_cycle_colors arg:colors arg:num_colors arguments arg arg Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "process_routing", + "source_code": "def process_routing(_obj, _method, /, **kwargs):\n if not kwargs:\n\n class EmptyRequest:\n\n def get(self, name, default=None):\n return Bunch(**{method: dict() for method in METHODS})\n\n def __getitem__(self, name):\n return Bunch(**{method: dict() for method in METHODS})\n\n def __getattr__(self, name):\n return Bunch(**{method: dict() for method in METHODS})\n return EmptyRequest()\n if not (hasattr(_obj, 'get_metadata_routing') or isinstance(_obj, MetadataRouter)):\n raise AttributeError(f'The given object ({_obj.__class__.__name__!r}) needs to either implement the routing method `get_metadata_routing` or be a `MetadataRouter` instance.')\n if _method not in METHODS:\n raise TypeError(f'Can only route and process input on these methods: {METHODS}, while the passed method is: {_method}.')\n request_routing = get_routing_for_object(_obj)\n request_routing.validate_metadata(params=kwargs, method=_method)\n routed_params = request_routing.route_params(params=kwargs, caller=_method)\n return routed_params", + "docstring": "Validate and route input parameters. This function is used inside a router's method, e.g. :term:, to validate the metadata and handle the routing. Assuming this signature of a router's fit method: `~utils.Bunch~sklearn.utils.Bunchobj.get_metadata_routing()`.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:process_routing arguments arg arg arg If ClassDef name:EmptyRequest FunctionDef name:get arg:self arg:name arg:default arguments arg arg arg Return return:yes Call Call FunctionDef name:__getitem__ arg:self arg:name arguments arg arg Return return:yes Call Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call Call Return return:yes Call If BoolOp Call Call Raise Call If Compare Raise Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_update", + "source_code": "def _update(self, event):\n if self.ignore(event) or event.button != 1:\n return\n if event.name == 'button_press_event' and self.ax.contains(event)[0]:\n self.drag_active = True\n event.canvas.grab_mouse(self.ax)\n if not self.drag_active:\n return\n if event.name == 'button_release_event' or (event.name == 'button_press_event' and (not self.ax.contains(event)[0])):\n self.drag_active = False\n event.canvas.release_mouse(self.ax)\n self._active_handle = None\n return\n xdata, ydata = self._get_data_coords(event)\n handle_index = np.argmin(np.abs([h.get_xdata()[0] - xdata for h in self._handles] if self.orientation == 'horizontal' else [h.get_ydata()[0] - ydata for h in self._handles]))\n handle = self._handles[handle_index]\n if handle is not self._active_handle:\n self._active_handle = handle\n self._update_val_from_pos(xdata if self.orientation == 'horizontal' else ydata)", + "docstring": "Update the slider position.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_update arg:self arg:event arguments arg arg If BoolOp Call Compare Return return:no If BoolOp Compare Call Assign Call If Return return:no If BoolOp Compare BoolOp Compare Call Assign Call Assign Return return:no Assign Call Assign Call Call Compare Call Call Assign If Compare Assign Call Compare" + }, + { + "library": "tensorflow", + "name": "sparse_dense_cwise_add", + "source_code": "def sparse_dense_cwise_add(sp_t, dense_t):\n result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t)\n return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)", + "docstring": "Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as . Returns: output: the SparseTensor output.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:sparse_dense_cwise_add arg:sp_t arg:dense_t arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_debug", + "source_code": "def is_debug(self) -> bool:\n return self.build_type_string == 'Debug'", + "docstring": "Checks Debug build.", + "type": "method", + "file_path": "pytorch\\tools\\setup_helpers\\env.py", + "ast_data": "FunctionDef name:is_debug arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_prepare_input_for_export", + "source_code": "def _prepare_input_for_export(args, kwargs):\n args, kwargs = _prepare_input_for_pytorch(args, kwargs)\n if not kwargs and len(args) > 0 and isinstance(args[-1], dict):\n onnx_inputs = args + ({},)\n elif kwargs:\n onnx_inputs = args + (kwargs,)\n else:\n onnx_inputs = args\n return onnx_inputs", + "docstring": "Prepare input for ONNX model export. Any future changes/formatting to the input before dispatching to the :func: api should be made in this function. Args: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method. Returns: onnx_inputs: positional arguments for ONNX model export, as in :func:.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_prepare_input_for_export arg:args arg:kwargs arguments arg arg Assign Call If BoolOp Compare Call Call Assign If Assign Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "handshakeCompleted", + "source_code": "def handshakeCompleted(self) -> None:\n assert self.transport is not None\n if self.transport.negotiatedProtocol is not None and self.transport.negotiatedProtocol != PROTOCOL_NAME:\n self._lose_connection_with_error([InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)])", + "docstring": "Close the connection if it's not made via the expected protocol", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py", + "ast_data": "FunctionDef name:handshakeCompleted arg:self arguments arg Compare If BoolOp Compare Compare Call Call" + }, + { + "library": "scikit-learn", + "name": "encode", + "source_code": "def encode(self, obj):\n data = [row for row in self.iter_encode(obj)]\n return '\\n'.join(data)", + "docstring": "Encodes a given object to an ARFF file. :param obj: the object containing the ARFF information. :return: the ARFF file as an string.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "FunctionDef name:encode arg:self arg:obj arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_is_full_circle_rad", + "source_code": "def _is_full_circle_rad(thetamin, thetamax):\n return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14", + "docstring": "Determine if a wedge (in radians) spans the full circle. The condition is derived from :class:.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:_is_full_circle_rad arg:thetamin arg:thetamax arguments arg arg Return return:yes Compare Call Call" + }, + { + "library": "scipy", + "name": "_log_gauss_mass", + "source_code": "def _log_gauss_mass(a, b):\n a, b = np.broadcast_arrays(a, b)\n case_left = b <= 0\n case_right = a > 0\n case_central = ~(case_left | case_right)\n\n def mass_case_left(a, b):\n return _log_diff(_norm_logcdf(b), _norm_logcdf(a))\n\n def mass_case_right(a, b):\n return mass_case_left(-b, -a)\n\n def mass_case_central(a, b):\n return sc.log1p(-_norm_cdf(a) - _norm_cdf(-b))\n out = np.full_like(a, fill_value=np.nan, dtype=np.complex128)\n if a[case_left].size:\n out[case_left] = mass_case_left(a[case_left], b[case_left])\n if a[case_right].size:\n out[case_right] = mass_case_right(a[case_right], b[case_right])\n if a[case_central].size:\n out[case_central] = mass_case_central(a[case_central], b[case_central])\n return np.real(out)", + "docstring": "Log of Gaussian probability mass within an interval", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:_log_gauss_mass arg:a arg:b arguments arg arg Assign Call Assign Compare Assign Compare Assign FunctionDef name:mass_case_left arg:a arg:b arguments arg arg Return return:yes Call Call Call FunctionDef name:mass_case_right arg:a arg:b arguments arg arg Return return:yes Call FunctionDef name:mass_case_central arg:a arg:b arguments arg arg Return return:yes Call Call Call Assign Call If Assign Call If Assign Call If Assign Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "rsa_crt_iqmp", + "source_code": "def rsa_crt_iqmp(p: int, q: int) -> int:\n return _modinv(q, p)", + "docstring": "Compute the CRT (q ** -1) % p value from RSA primes p and q.", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:rsa_crt_iqmp arg:p arg:q arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "num_jac", + "source_code": "def num_jac(fun, t, y, f, threshold, factor, sparsity=None):\n y = np.asarray(y)\n n = y.shape[0]\n if n == 0:\n return (np.empty((0, 0)), factor)\n if factor is None:\n factor = np.full(n, EPS ** 0.5)\n else:\n factor = factor.copy()\n f_sign = 2 * (np.real(f) >= 0).astype(float) - 1\n y_scale = f_sign * np.maximum(threshold, np.abs(y))\n h = y + factor * y_scale - y\n for i in np.nonzero(h == 0)[0]:\n while h[i] == 0:\n factor[i] *= 10\n h[i] = y[i] + factor[i] * y_scale[i] - y[i]\n if sparsity is None:\n return _dense_num_jac(fun, t, y, f, h, factor, y_scale)\n else:\n structure, groups = sparsity\n return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups)", + "docstring": "Finite differences Jacobian approximation tailored for ODE solvers. This function computes finite difference approximation to the Jacobian matrix of with respect to using forward differences. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to `ythresholdstructurefactor` for the next evaluation.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py", + "ast_data": "FunctionDef name:num_jac arg:fun arg:t arg:y arg:f arg:threshold arg:factor arg:sparsity arguments arg arg arg arg arg arg arg Assign Call Assign If Compare Return return:yes Call If Compare Assign Call Assign Call Assign Call Compare Call Assign Call Call Assign For Call Compare While Compare Assign If Compare Return return:yes Call Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_dodge_needed", + "source_code": "def _dodge_needed(self):\n groupers = list({self.orient, 'col', 'row'} & set(self.variables))\n if 'hue' in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, 'hue']].value_counts()\n return orient.size != paired.size\n return False", + "docstring": "Return True when use of would cause overlaps.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_dodge_needed arg:self arguments arg Assign Call Call If Compare Assign Call Assign Call Return return:yes Compare Return return:yes" + }, + { + "library": "scrapy", + "name": "arglist_to_dict", + "source_code": "def arglist_to_dict(arglist: list[str]) -> dict[str, str]:\n return dict((x.split('=', 1) for x in arglist))", + "docstring": "Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a dict", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\conf.py", + "ast_data": "FunctionDef name:arglist_to_dict arg:arglist arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_raise_if_missing", + "source_code": "def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:\n if len(key) == 0:\n return\n missing_mask = indexer < 0\n nmissing = missing_mask.sum()\n if nmissing:\n if nmissing == len(indexer):\n raise KeyError(f'None of [{key}] are in the [{axis_name}]')\n not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())\n raise KeyError(f'{not_found} not in index')", + "docstring": "Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_raise_if_missing arg:self arg:key arg:indexer arg:axis_name arguments arg arg arg arg If Compare Call Return return:no Assign Compare Assign Call If If Compare Call Raise Call Assign Call Call Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "assert_has_rank", + "source_code": "def assert_has_rank(self, rank):\n if self.rank not in (None, rank):\n raise ValueError('Shape %s must have rank %d' % (self, rank))", + "docstring": "Raises an exception if is not compatible with the given . Args: rank: An integer. Raises: ValueError: If does not represent a shape with the given .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:assert_has_rank arg:self arg:rank arguments arg arg If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "truediv", + "source_code": "@tf_export('math.truediv', 'truediv')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef truediv(x, y, name=None):\n return _truediv_python3(x, y, name)", + "docstring": "Divides x / y elementwise (using Python 3 division operator semantics). NOTE: Prefer using the Tensor operator or tf.divide which obey Python division operator semantics. This function forces Python 3 division operator semantics where all integer arguments are cast to floating types first. If you want integer division that rounds down, use or . and must have the same numeric type. If the inputs are floating point, the output will have the same type. If the inputs are integral, the inputs are cast to for and and for and (matching the behavior of Numpy). Example: >>> # Division with integer tensors (returns float) >>> x1 = tf.constant([10, 20, 30], dtype=tf.int32) >>> y1 = tf.constant([2, 4, 5], dtype=tf.int32) >>> result1 = tf.math.truediv(x1, y1) >>> # Division with different shaped tensors (broadcasting) >>> x2 = tf.constant([[10, 20], [30, 40]], dtype=tf.float64) >>> y2 = tf.constant([2, 5], dtype=tf.float64) >>> result2 = tf.math.truediv(x2, y2) # Handling potential division by zero (returns inf) >>> x3 = tf.constant(5, dtype=tf.float32) >>> y3 = tf.constant(0, dtype=tf.float32) >>> result3 = tf.math.truediv(x3, y3) Args: x: numerator of numeric type. y: denominator of numeric type. name: A name for the operation (optional). Returns: evaluated in floating point. Raises: TypeError: If and have different dtypes.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:truediv arg:x arg:y arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "group", + "source_code": "def group(self, group_id):\n self._validate_group_id(group_id)\n return self._Context(self, group_id)", + "docstring": "Enter a context where the lock is with group . Args: group_id: The group for which to acquire and release the lock. Returns: A context manager which will acquire the lock for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", + "ast_data": "FunctionDef name:group arg:self arg:group_id arguments arg arg Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "broadcast_to", + "source_code": "@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')\ndef broadcast_to(array, shape, subok=False):\n return _broadcast_to(array, shape, subok=subok, readonly=True)", + "docstring": "Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple or int The shape of the desired array. A single integer ``. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. See Also -------- broadcast broadcast_arrays broadcast_shapes Examples -------- >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_stride_tricks_impl.py", + "ast_data": "FunctionDef name:broadcast_to arg:array arg:shape arg:subok arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "new_axes", + "source_code": "def new_axes(objs: list[Series | DataFrame], bm_axis: AxisInt, intersect: bool, sort: bool, keys: Iterable[Hashable] | None, names: list[HashableT] | None, axis: AxisInt, levels, verify_integrity: bool, ignore_index: bool) -> list[Index]:\n return [_get_concat_axis_dataframe(objs, axis, ignore_index, keys, names, levels, verify_integrity) if i == bm_axis else get_objs_combined_axis(objs, axis=objs[0]._get_block_manager_axis(i), intersect=intersect, sort=sort) for i in range(2)]", + "docstring": "Return the new [index, column] result for concat.", + "type": "function", + "file_path": "pandas\\pandas\\core\\reshape\\concat.py", + "ast_data": "FunctionDef name:new_axes arg:objs arg:bm_axis arg:intersect arg:sort arg:keys arg:names arg:axis arg:levels arg:verify_integrity arg:ignore_index arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Compare Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "compute_dz", + "source_code": "def compute_dz(self):\n el_geom_w = self.compute_geom_weights()\n el_geom_grad = self.compute_geom_grads()\n w_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(el_geom_w))\n dfx_el_w = np.empty_like(el_geom_w)\n dfy_el_w = np.empty_like(el_geom_w)\n for iapex in range(3):\n dfx_el_w[:, iapex] = el_geom_w[:, iapex] * el_geom_grad[:, 0]\n dfy_el_w[:, iapex] = el_geom_w[:, iapex] * el_geom_grad[:, 1]\n dfx_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(dfx_el_w))\n dfy_node_sum = np.bincount(np.ravel(self._triangles), weights=np.ravel(dfy_el_w))\n dfx_estim = dfx_node_sum / w_node_sum\n dfy_estim = dfy_node_sum / w_node_sum\n return np.vstack([dfx_estim, dfy_estim]).T", + "docstring": "self.df is computed as weighted average of _triangles sharing a common node. On each triangle itri f is first assumed linear (= ~f), which allows to compute d~f[itri] Then the following approximation of df nodal values is then proposed: f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt) The weighted coeff. w[itri] are proportional to the angle of the triangle itri at apex ipt", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:compute_dz arg:self arguments arg Assign Call Assign Call Assign Call Call Call Assign Call Assign Call For Call Assign Assign Assign Call Call Call Assign Call Call Call Assign Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X, y=None, groups=None):\n if groups is not None:\n warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n return super().split(X, y, groups=groups)", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_JoinConfig", + "source_code": "class _JoinConfig(NamedTuple):\n enable: bool\n throw_on_early_termination: bool\n is_first_joinable: bool\n\n @staticmethod\n def construct_disabled_join_config():\n return _JoinConfig(enable=False, throw_on_early_termination=False, is_first_joinable=False)", + "docstring": "This includes all fields needed from a :class: instance for the join context manager side.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py", + "ast_data": "ClassDef name:_JoinConfig FunctionDef name:construct_disabled_join_config arguments Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "neighbors", + "source_code": "@property\ndef neighbors(self):\n if self._neighbors is None:\n self._neighbors = self.get_cpp_triangulation().get_neighbors()\n return self._neighbors", + "docstring": "Return integer array of shape (ntri, 3) containing neighbor triangles. For each triangle, the indices of the three triangles that share the same edges, or -1 if there is no such neighboring triangle. ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py", + "ast_data": "FunctionDef name:neighbors arg:self arguments arg If Compare Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "dtypes", + "source_code": "@property\ndef dtypes(self) -> Series:\n from pandas import Index, Series\n pa_type = self._data.dtype.pyarrow_dtype\n types = [ArrowDtype(struct.type) for struct in pa_type]\n names = [struct.name for struct in pa_type]\n return Series(types, index=Index(names))", + "docstring": "Return the dtype object of each child field of the struct. Returns ------- pandas.Series The data type of each child field. See Also -------- Series.dtype: Return the dtype object of the underlying data. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... {\"version\": 1, \"project\": \"pandas\"}, ... {\"version\": 2, \"project\": \"pandas\"}, ... {\"version\": 1, \"project\": \"numpy\"}, ... ], ... dtype=pd.ArrowDtype( ... pa.struct([(\"version\", pa.int64()), (\"project\", pa.string())]) ... ), ... ) >>> s.struct.dtypes version int64[pyarrow] project string[pyarrow] dtype: object", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py", + "ast_data": "FunctionDef name:dtypes arg:self arguments arg Assign Assign Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Container", + "source_code": "class Container(object):\n\n def __init__(self, output_names=None):\n self._output_names = output_names\n\n def build(self, y_pred):\n if self._output_names is None:\n self._output_names = create_pseudo_output_names(y_pred)\n\n def _conform_to_outputs(self, outputs, struct):\n struct = map_to_output_names(outputs, self._output_names, struct)\n struct = map_missing_dict_keys(outputs, struct)\n if not nest.is_nested(struct) and nest.is_nested(outputs):\n struct = nest.map_structure(lambda _: struct, outputs)\n return struct\n\n def _maybe_broadcast_to_outputs(self, outputs, objects):\n if not self._should_broadcast(objects):\n return objects\n should_copy_objects = len(nest.flatten(outputs)) > 1\n\n def _broadcast_fn():\n if should_copy_objects:\n return nest.map_structure(self._copy_object, objects)\n return objects\n return nest.map_structure(lambda _: _broadcast_fn(), outputs)\n\n def _should_broadcast(self, objects):\n raise NotImplementedError\n\n def _copy_object(self, obj):\n raise NotImplementedError", + "docstring": "Base Container class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "ClassDef name:Container FunctionDef name:__init__ arg:self arg:output_names arguments arg arg Assign FunctionDef name:build arg:self arg:y_pred arguments arg arg If Compare Assign Call FunctionDef name:_conform_to_outputs arg:self arg:outputs arg:struct arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Assign Call arguments arg Return return:yes FunctionDef name:_maybe_broadcast_to_outputs arg:self arg:outputs arg:objects arguments arg arg arg If Call Return return:yes Assign Compare Call Call FunctionDef name:_broadcast_fn arguments If Return return:yes Call Return return:yes Return return:yes Call arguments arg Call FunctionDef name:_should_broadcast arg:self arg:objects arguments arg arg Raise FunctionDef name:_copy_object arg:self arg:obj arguments arg arg Raise" + }, + { + "library": "django", + "name": "get_distance", + "source_code": "def get_distance(self, f, value, lookup_type):\n if not value:\n return []\n value = value[0]\n if isinstance(value, Distance):\n if f.geodetic(self.connection):\n dist_param = value.m\n else:\n dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))\n else:\n dist_param = value\n if lookup_type == 'dwithin':\n dist_param = 'distance=%s' % dist_param\n return [dist_param]", + "docstring": "Return the distance parameters given the value and the lookup type. On Oracle, geometry columns with a geodetic coordinate system behave implicitly like a geography column, and thus meters will be used as the distance parameter on them.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\operations.py", + "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:value arg:lookup_type arguments arg arg arg arg If Return return:no Assign If Call If Call Assign Assign Call Call Call Assign If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "register_hook", + "source_code": "@abc.abstractmethod\ndef register_hook(self, fn: Callable[..., Any]) -> RemovableHandle:\n raise NotImplementedError", + "docstring": "Register a backward hook. The hook will be called every time a gradient with respect to the Node is computed. The hook should have the following signature:: hook(grad_inputs: Tuple[Tensor], grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of :attr:. This function returns a handle with a method `backward-hooks-executiongrad_outputsgrad_inputsgrad_outputs`. Example:: >>> import torch >>> a = torch.tensor([0., 0., 0.], requires_grad=True) >>> b = a.clone() >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) >>> handle = b.grad_fn.register_hook(lambda gI, gO: (gO[0] * 2,)) >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([2., 2., 2.]) >>> handle.remove() # Removes the hook >>> a.grad = None >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([1., 1., 1.])", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\graph.py", + "ast_data": "FunctionDef name:register_hook arg:self arg:fn arguments arg arg Raise" + }, + { + "library": "scipy", + "name": "nth_moment", + "source_code": "def nth_moment(n_k, a_k, b_k):\n num = (a_k + b_k) ** (0.5 * n_k)\n denom = 2 ** n_k * sc.beta(a_k, b_k)\n indices = np.arange(n_k + 1)\n sgn = np.where(indices % 2 > 0, -1, 1)\n d = sc.beta(a_k + 0.5 * n_k - indices, b_k - 0.5 * n_k + indices)\n sum_terms = sc.comb(n_k, indices) * sgn * d\n return num / denom * sum_terms.sum()", + "docstring": "Computes E[T^(n_k)] where T is skew-t distributed with parameters a_k and b_k.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:nth_moment arg:n_k arg:a_k arg:b_k arguments arg arg arg Assign Assign Call Assign Call Assign Call Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "broadcast_dynamic_shape", + "source_code": "@tf_export('broadcast_dynamic_shape')\n@dispatch.add_dispatch_support\ndef broadcast_dynamic_shape(shape_x, shape_y):\n return gen_array_ops.broadcast_args(shape_x, shape_y)", + "docstring": "Computes the shape of a broadcast given symbolic shapes. When and are Tensors representing shapes (i.e. the result of calling tf.shape on another Tensor) this computes a Tensor which is the shape of the result of a broadcasting op applied in tensors of shapes and . This is useful when validating the result of a broadcasting operation when the tensors do not have statically known shapes. Example: >>> shape_x = (1, 2, 3) >>> shape_y = (5, 1, 3) >>> tf.broadcast_dynamic_shape(shape_x, shape_y) Args: shape_x: A rank 1 integer , representing the shape of x. shape_y: A rank 1 integer , representing the shape of y. Returns: A rank 1 integer representing the broadcasted shape. Raises: InvalidArgumentError: If the two shapes are incompatible for broadcasting.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:broadcast_dynamic_shape arg:shape_x arg:shape_y arguments arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "as_dict", + "source_code": "def as_dict(self, is_private=False, **params):\n return {'keys': [k.as_dict(is_private, **params) for k in self.keys]}", + "docstring": "Represent this key as a dict of the JSON Web Key Set.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\key_set.py", + "ast_data": "FunctionDef name:as_dict arg:self arg:is_private arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_read_long", + "source_code": "def _read_long(f):\n return np.int32(struct.unpack('>l', f.read(4))[0])", + "docstring": "Read a signed 32-bit integer", + "type": "function", + "file_path": "scipy\\scipy\\io\\_idl.py", + "ast_data": "FunctionDef name:_read_long arg:f arguments arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "inaxes", + "source_code": "def inaxes(self, xy):\n axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()]\n if axes_list:\n axes = cbook._topmost_artist(axes_list)\n else:\n axes = None\n return axes", + "docstring": "Return the topmost visible containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- or None The topmost visible Axes containing the point, or None if there is no Axes at the point.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:inaxes arg:self arg:xy arguments arg arg Assign Call BoolOp Call Call If Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_data_mask", + "source_code": "def _get_data_mask(self, t, f1, f2, where):\n if where is None:\n where = True\n else:\n where = np.asarray(where, dtype=bool)\n if where.size != t.size:\n msg = 'where size ({}) does not match {!r} size ({})'.format(where.size, self.t_direction, t.size)\n raise ValueError(msg)\n return where & ~functools.reduce(np.logical_or, map(np.ma.getmaskarray, [t, f1, f2]))", + "docstring": "Return a bool array, with True at all points that should eventually be rendered. The array is True at a point if none of the data inputs *t*, *f1*, *f2* is masked and if the input *where* is true at that point.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_get_data_mask arg:self arg:t arg:f1 arg:f2 arg:where arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Assign Call Raise Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "pdf", + "source_code": "def pdf(self, x, *args, **kwds):\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.promote_types(x.dtype, np.float64)\n x = np.asarray((x - loc) / scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n putmask(output, 1 - cond0 + np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *(x,) + args + (scale,))\n scale, goodargs = (goodargs[-1], goodargs[:-1])\n place(output, cond, self._pdf(*goodargs) / scale)\n if output.ndim == 0:\n return output[()]\n return output", + "docstring": "Probability density function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- pdf : ndarray Probability density function evaluated at x", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:pdf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Assign Call Call Call Call If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "registry_name", + "source_code": "@staticmethod\n@abc.abstractmethod\ndef registry_name() -> str:\n pass", + "docstring": "See ExtensionRegistry.from_descriptor_list", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py", + "ast_data": "FunctionDef name:registry_name arguments" + }, + { + "library": "pytorch", + "name": "LazyInstanceNorm2d", + "source_code": "class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):\n cls_to_become = InstanceNorm2d\n\n def _get_no_batch_dim(self):\n return 3\n\n def _check_input_dim(self, input):\n if input.dim() not in (3, 4):\n raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')", + "docstring": "A :class: module with lazy initialization of the `InstanceNorm2dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinC(N, C, H, W)(C, H, W)(N, C, H, W)(C, H, W)(N, C, H, W)(C, H, W)` (same shape as input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py", + "ast_data": "ClassDef name:LazyInstanceNorm2d Assign FunctionDef name:_get_no_batch_dim arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_ndims_from_shape", + "source_code": "def _ndims_from_shape(shape):\n if shape.get_shape().ndims not in (None, 1):\n raise ValueError('input is not a valid shape: not 1D')\n if not shape.dtype.is_integer:\n raise TypeError('input is not a valid shape: wrong dtype')\n if shape.get_shape().is_fully_defined():\n return constant_op.constant(shape.get_shape().as_list()[0])\n return array_ops.shape(shape)[0]", + "docstring": "Returns 's implied by a shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_ndims_from_shape arg:shape arguments arg If Compare Call Raise Call If Raise Call If Call Call Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "set_shape", + "source_code": "def set_shape(self, shape):\n new_self = self.reshape(shape, copy=False).asformat(self.format)\n self.__dict__ = new_self.__dict__", + "docstring": "Set the shape of the matrix in-place", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_matrix.py", + "ast_data": "FunctionDef name:set_shape arg:self arg:shape arguments arg arg Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_save_function_alias", + "source_code": "def _save_function_alias(saved_model_dir: str, tags: Collection[str], function_aliases: Mapping[str, str]) -> None:\n loader = saved_model_loader.SavedModelLoader(saved_model_dir)\n meta_graph_def = loader.get_meta_graph_def_from_tags(tags)\n for function_name, function_alias in function_aliases.items():\n meta_graph_def.meta_info_def.function_aliases[function_name] = function_alias\n saved_model_proto_serialized = loader.saved_model.SerializeToString()\n path = file_io.join(saved_model_dir, saved_model_constants.SAVED_MODEL_FILENAME_PB)\n file_io.atomic_write_string_to_file(path, saved_model_proto_serialized)", + "docstring": "Saves the function alias to the SavedModel. SavedModelBuilder (TF1 saved model saver) does not support saving function aliases, so this function loads the SavedModel proto and adds the field. Args: saved_model_dir: Path to the saved model directory. tags: A collection of tags to specify the meta graph. function_aliases: Function name -> function alias mapping.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py", + "ast_data": "FunctionDef name:_save_function_alias arg:saved_model_dir arg:tags arg:function_aliases arguments arg arg arg Assign Call Assign Call For Call Assign Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "regex_full_match", + "source_code": "@tf_export('strings.regex_full_match')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef regex_full_match(input, pattern, name=None):\n if isinstance(pattern, util_compat.bytes_or_text_types):\n return gen_string_ops.static_regex_full_match(input=input, pattern=pattern, name=name)\n return gen_string_ops.regex_full_match(input=input, pattern=pattern, name=name)", + "docstring": "Match elements of with regex . Args: input: string , the source strings to process. pattern: string or scalar string , regular expression to use, see more details at name: Name of the op. Returns: bool of the same shape as with match results.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py", + "ast_data": "FunctionDef name:regex_full_match arg:input arg:pattern arg:name arguments arg arg arg If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "__getitem__", + "source_code": "def __getitem__(self, indx):\n m = self._mask\n if isinstance(m[indx], ndarray):\n return masked_array(data=self._data[indx], mask=m[indx], fill_value=self._fill_value[indx], hard_mask=self._hardmask)\n if m is not nomask and m[indx]:\n return masked\n return self._data[indx]", + "docstring": "Get the index.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:indx arguments arg arg Assign If Call Return return:yes Call If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "save_npz", + "source_code": "def save_npz(file, matrix, compressed=True):\n arrays_dict = {}\n if matrix.format in ('csc', 'csr', 'bsr'):\n arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)\n elif matrix.format == 'dia':\n arrays_dict.update(offsets=matrix.offsets)\n elif matrix.format == 'coo':\n arrays_dict.update(row=matrix.row, col=matrix.col)\n else:\n msg = f'Save is not implemented for sparse matrix of format {matrix.format}.'\n raise NotImplementedError(msg)\n arrays_dict.update(format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data)\n if isinstance(matrix, sp.sparse.sparray):\n arrays_dict.update(_is_array=True)\n if compressed:\n np.savez_compressed(file, **arrays_dict)\n else:\n np.savez(file, **arrays_dict)", + "docstring": "Save a sparse matrix or array to a file using `` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import numpy as np >>> import scipy as sp >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) >>> sparse_matrix >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = sp.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_matrix_io.py", + "ast_data": "FunctionDef name:save_npz arg:file arg:matrix arg:compressed arguments arg arg arg Assign If Compare Call If Compare Call If Compare Call Assign Raise Call Call Call If Call Call If Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):\n super(TFLiteConverter, self).__init__(graph_def, input_tensors, output_tensors, input_arrays_with_shape, output_arrays, experimental_debug_info_func)", + "docstring": "Constructor for TFLiteConverter. Args: graph_def: Frozen TensorFlow GraphDef. input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). input_arrays_with_shape: Tuple of strings representing input tensor names and list of integers representing input shapes (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) output_arrays: List of output tensors to freeze graph with. Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the . Raises: ValueError: Invalid arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:graph_def arg:input_tensors arg:output_tensors arg:input_arrays_with_shape arg:output_arrays arg:experimental_debug_info_func arguments arg arg arg arg arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_with_precomputed_value_rowids", + "source_code": "def _with_precomputed_value_rowids(self):\n return RowPartition(row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self.value_rowids(), nrows=self._nrows, nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)", + "docstring": "Returns a copy of with precomputed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_with_precomputed_value_rowids arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "__contains__", + "source_code": "def __contains__(self, element: Any, /) -> bool:\n if has_torch_function_unary(self):\n return handle_torch_function(Tensor.__contains__, (self,), self, element)\n if isinstance(element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)):\n return bool((element == self).any().item())\n raise RuntimeError(f'Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}.')", + "docstring": "Check if is present in tensor Args: element (Tensor or scalar): element to be checked for presence in current tensor\"", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:__contains__ arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call Call Compare Raise Call Call" + }, + { + "library": "cherrypy", + "name": "messageArg", + "source_code": "@cherrypy.expose\ndef messageArg(self):\n message = \"If you construct an HTTPError with a 'message' argument, it wil be placed on the error page (underneath the status line by default).\"\n raise cherrypy.HTTPError(500, message=message)", + "docstring": "Respond with an HTTP 500 and a custom message.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut10_http_errors.py", + "ast_data": "FunctionDef name:messageArg arg:self arguments arg Assign Raise Call" + }, + { + "library": "pandas", + "name": "min", + "source_code": "def min(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs):\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n if not len(self):\n return self._na_value\n if len(self) and self.is_monotonic_increasing:\n first = self[0]\n if not isna(first):\n return first\n if not self._is_multi and self.hasnans:\n mask = self._isnan\n if not skipna or mask.all():\n return self._na_value\n if not self._is_multi and (not isinstance(self._values, np.ndarray)):\n return self._values._reduce(name='min', skipna=skipna)\n return nanops.nanmin(self._values, skipna=skipna)", + "docstring": "Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series. skipna : bool, default True Exclude NA/null values when showing the result. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index([\"c\", \"b\", \"a\"]) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([(\"a\", \"b\"), (2, 1)]) >>> idx.min() ('a', 1)", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:min arg:self arg:axis arg:skipna arguments arg arg arg arg arg Call Call If Call Return return:yes If BoolOp Call Assign If Call Return return:yes If BoolOp Assign If BoolOp Call Return return:yes If BoolOp Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "validate_func_kwargs", + "source_code": "def validate_func_kwargs(kwargs: dict) -> tuple[list[str], list[str | Callable[..., Any]]]:\n tuple_given_message = 'func is expected but received {} in **kwargs.'\n columns = list(kwargs)\n func = []\n for col_func in kwargs.values():\n if not (isinstance(col_func, str) or callable(col_func)):\n raise TypeError(tuple_given_message.format(type(col_func).__name__))\n func.append(col_func)\n if not columns:\n no_arg_message = \"Must provide 'func' or named aggregation **kwargs.\"\n raise TypeError(no_arg_message)\n return (columns, func)", + "docstring": "Validates types of user-provided \"named aggregation\" kwargs. is raised if aggfunc is not or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provided keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({\"one\": \"min\", \"two\": \"max\"}) (['one', 'two'], ['min', 'max'])", + "type": "function", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:validate_func_kwargs arg:kwargs arguments arg Assign Assign Call Assign For Call If BoolOp Call Call Raise Call Call Call Call If Assign Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "to_dense", + "source_code": "def to_dense(self) -> Tensor:\n partial_dense = _ordered_to_dense(self.kv_num_blocks, self.kv_indices)\n if self.full_kv_num_blocks is not None:\n assert self.full_kv_indices is not None\n return partial_dense | _ordered_to_dense(self.full_kv_num_blocks, self.full_kv_indices)\n return partial_dense", + "docstring": "Returns a dense block that is equivalent to the block mask.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:to_dense arg:self arguments arg Assign Call If Compare Compare Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "DefaultInputIterator", + "source_code": "class DefaultInputIterator(object):\n\n def __init__(self, dataset):\n self._dataset = dataset\n if eager_context.executing_eagerly():\n self._iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n self._iterator = dataset_ops.make_initializable_iterator(dataset)\n\n def get_next(self):\n return self._iterator.get_next()\n\n def get_next_as_optional(self):\n return self._iterator.get_next_as_optional()\n\n @deprecated(None, \"Use the iterator's `initializer` property instead.\")\n def initialize(self):\n if eager_context.executing_eagerly():\n self._iterator = self._dataset.make_one_shot_iterator()\n return []\n else:\n return [self._iterator.initializer]\n\n @property\n def initializer(self):\n return self.initialize()", + "docstring": "Default implementation of for default strategy.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:DefaultInputIterator FunctionDef name:__init__ arg:self arg:dataset arguments arg arg Assign If Call Assign Call Assign Call FunctionDef name:get_next arg:self arguments arg Return return:yes Call FunctionDef name:get_next_as_optional arg:self arguments arg Return return:yes Call FunctionDef name:initialize arg:self arguments arg If Call Assign Call Return return:no Return return:yes Call FunctionDef name:initializer arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "set_up_planner", + "source_code": "@abc.abstractmethod\ndef set_up_planner(self, state_dict: STATE_DICT_TYPE, storage_meta: Optional[StorageMeta]=None, is_coordinator: bool=False) -> None:\n pass", + "docstring": "Initialize this planner to save ``. Implementations should save those values as they won't be provided lated in the save process. This is called on all ranks.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py", + "ast_data": "FunctionDef name:set_up_planner arg:self arg:state_dict arg:storage_meta arg:is_coordinator arguments arg arg arg arg" + }, + { + "library": "pygame", + "name": "empty", + "source_code": "def empty(self):\n for sprite in self.sprites():\n self.remove_internal(sprite)\n sprite.remove_internal(self)", + "docstring": "remove all sprites Group.empty(): return None Removes all the sprites from the group.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:empty arg:self arguments arg For Call Call Call" + }, + { + "library": "pytorch", + "name": "CUDADeviceVariable", + "source_code": "class CUDADeviceVariable(ContextWrappingVariable):\n\n @staticmethod\n def create(tx: 'InstructionTranslator', device, **kwargs):\n var = CUDADeviceVariable(target_values=[torch.cuda._get_device_index(device, optional=True)], initial_values=None, **kwargs)\n return var\n\n def __init__(self, target_values, initial_values=None, **kwargs) -> None:\n super().__init__(target_values=target_values, initial_values=initial_values, **kwargs)\n self.target_values = target_values\n\n def exit(self, tx: 'InstructionTranslator', *args):\n self.cleanup_assert()\n tx.output.create_node('call_function', torch.cuda._maybe_exchange_device, (self.proxy,), {})\n return variables.ConstantVariable.create(False)\n\n def enter(self, tx):\n prev_idx = torch.cuda._exchange_device(*self.target_values)\n self.set_cleanup_hook(tx, lambda: torch.cuda._maybe_exchange_device(prev_idx))\n self.proxy = tx.output.create_node('call_function', torch.cuda._exchange_device, (*self.target_values,), {})\n\n def module_name(self):\n return 'torch.cuda'\n\n def fn_name(self):\n return 'device'", + "docstring": "represents torch.cuda.device", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py", + "ast_data": "ClassDef name:CUDADeviceVariable FunctionDef name:create arg:tx arg:device arguments arg arg arg Assign Call Call Return return:yes FunctionDef name:__init__ arg:self arg:target_values arg:initial_values arguments arg arg arg arg Call Call Assign FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call FunctionDef name:enter arg:self arg:tx arguments arg arg Assign Call Call arguments Call Assign Call FunctionDef name:module_name arg:self arguments arg Return return:yes FunctionDef name:fn_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_save_options", + "source_code": "def get_save_options():\n return _save_context.options()", + "docstring": "Returns the save options if under a save context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_context.py", + "ast_data": "FunctionDef name:get_save_options arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "standalone_compile", + "source_code": "def standalone_compile(gm: torch.fx.GraphModule, example_inputs: list[InputType], *, dynamic_shapes: Literal['from_example_inputs', 'from_tracing_context', 'from_graph']='from_graph', options: Optional[dict[str, Any]]=None) -> CompiledArtifact:\n from .standalone_compile import standalone_compile\n options = options if options else {}\n return standalone_compile(gm, example_inputs, dynamic_shapes=dynamic_shapes, options=options)", + "docstring": "Precompilation API for inductor. .. code-block:: python compiled_artifact = torch._inductor.standalone_compile(gm, args) compiled_artifact.save(path=path, format=\"binary\") # Later on a new process loaded = torch._inductor.CompiledArtifact.load(path=path, format=\"binary\") compiled_out = loaded(*args) Args: gm: Graph Module example_inputs: Inputs for the graph module dynamic_shapes: If \"from_graph\" (default), we will use the dynamic shapes in the passed-in graph module. If \"from_tracing_context\", we use the dynamic shape info in the ambient tracing context. If \"from_example_inputs\", we will specialize the graph on the example_inputs. options: Inductor compilation options Returns: CompiledArtifact that can be saved to disk or invoked directly.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\__init__.py", + "ast_data": "FunctionDef name:standalone_compile arg:gm arg:example_inputs arguments arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "RestoredOptimizer", + "source_code": "class RestoredOptimizer(OptimizerV2):\n\n def __init__(self):\n super(RestoredOptimizer, self).__init__('RestoredOptimizer')\n self._hypers_created = True\n\n def get_config(self):\n raise NotImplementedError('Restoring functional Optimizers from SavedModels is not currently supported. Please file a feature request if this limitation bothers you.')", + "docstring": "A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object itself (e.g. through ).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "ClassDef name:RestoredOptimizer FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:get_config arg:self arguments arg Raise Call" + }, + { + "library": "pandas", + "name": "render_pep440_post", + "source_code": "def render_pep440_post(pieces):\n if pieces['closest-tag']:\n rendered = pieces['closest-tag']\n if pieces['distance'] or pieces['dirty']:\n rendered += f'.post{pieces['distance']}'\n if pieces['dirty']:\n rendered += '.dev0'\n rendered += plus_or_dot(pieces)\n rendered += f'g{pieces['short']}'\n else:\n rendered = f'0.post{pieces['distance']}'\n if pieces['dirty']:\n rendered += '.dev0'\n rendered += f'+g{pieces['short']}'\n return rendered", + "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] . The \".dev0\" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear \"older\" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:render_pep440_post arg:pieces arguments arg If Assign If BoolOp If Call Assign If Return return:yes" + }, + { + "library": "scipy", + "name": "_getrow", + "source_code": "def _getrow(self, i):\n M, N = self.shape\n i = int(i)\n if i < 0:\n i += M\n if i < 0 or i >= M:\n raise IndexError(f'index ({i}) out of range')\n return self._get_submatrix(minor=i).tocsr()", + "docstring": "Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_csc.py", + "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg Assign Assign Call If Compare If BoolOp Compare Compare Raise Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "is_local_src_dir", + "source_code": "def is_local_src_dir(directory):\n if not is_string(directory):\n return False\n abs_dir = os.path.abspath(directory)\n c = os.path.commonprefix([os.getcwd(), abs_dir])\n new_dir = abs_dir[len(c):].split(os.sep)\n if new_dir and (not new_dir[0]):\n new_dir = new_dir[1:]\n if new_dir and new_dir[0] == 'build':\n return False\n new_dir = os.sep.join(new_dir)\n return os.path.isdir(new_dir)", + "docstring": "Return true if directory is local directory.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:is_local_src_dir arg:directory arguments arg If Call Return return:yes Assign Call Assign Call Call Assign Call Call If BoolOp Assign If BoolOp Compare Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "count", + "source_code": "@forbid_nonstring_types(['bytes'])\ndef count(self, pat, flags: int=0):\n result = self._data.array._str_count(pat, flags)\n return self._wrap_result(result, returns_string=False)", + "docstring": "Count occurrences of pattern in each string of the Series/Index. This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the :class:. Parameters ---------- pat : str Valid regular expression. flags : int, default 0, meaning no flags Flags for the module. For a complete list, _. Returns ------- Series or Index Same type as the calling object containing the integer counts. See Also -------- re : Standard library module for regular expressions. str.count : Standard library version, without regular expression support. Notes ----- Some characters need to be escaped when passing in . eg. `` to find the literal dollar sign. >>> s = pd.Series([\"$\", \"B\", \"Aab$\", \"$$ca\", \"C$B$\", \"cat\"]) >>> s.str.count(\"\\\\$\") 0 1 1 0 2 1 3 2 4 2 5 0 dtype: int64 This is also available on Index >>> pd.Index([\"A\", \"A\", \"Aaba\", \"cat\"]).str.count(\"a\") Index([0, 0, 2, 1], dtype='int64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\strings\\accessor.py", + "ast_data": "FunctionDef name:count arg:self arg:pat arg:flags arguments arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "conv3d", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)):\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n padding = _preprocess_padding(padding)\n x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n return x", + "docstring": "3D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, or . data_format: string, or . dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if is neither or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:conv3d arg:x arg:kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "benchmark_gpu", + "source_code": "@time_and_count\ndef benchmark_gpu(self: Self, _callable: Callable[[], Any], estimation_iters: int=5, memory_warmup_iters: int=100, benchmark_iters: int=100, max_benchmark_duration: int=25, **kwargs: Any) -> float:\n torch.cuda.synchronize()\n _callable()\n torch.cuda.synchronize()\n buffer = torch.empty(self.L2_cache_size // 4, dtype=torch.int, device='cuda')\n buffer.zero_()\n event_pairs = self.get_event_pairs(estimation_iters)\n for start_event, end_event in event_pairs:\n buffer.zero_()\n start_event.record()\n _callable()\n end_event.record()\n torch.cuda.synchronize()\n estimated_timing = self.get_event_pairs_min_timing(event_pairs)\n benchmark_iters = max(min(benchmark_iters, int(max_benchmark_duration // estimated_timing)), 1)\n for _ in range(memory_warmup_iters):\n buffer.zero_()\n event_pairs = self.get_event_pairs(benchmark_iters)\n for start_event, end_event in event_pairs:\n buffer.zero_()\n start_event.record()\n _callable()\n end_event.record()\n torch.cuda.synchronize()\n benchmarked_timing = self.get_event_pairs_min_timing(event_pairs)\n del buffer\n return min(estimated_timing, benchmarked_timing)", + "docstring": "Benchmark a GPU callable using a custom benchmarking implementation. Arguments: - _callable: The callable to benchmark. Keyword Arguments: - estimation_iters: Optionally, the number of iterations to run during runtime estimation. - memory_warmup_iters: Optionally, the number of iterations to flush the L2 cache before starting benchmarking. - benchmark_iters: Optionally, the number of iterations to run during the benchmarking. - max_benchmark_duration: Optionally, the maximum duration of the benchmarking, in milliseconds. An estimated duration is calculated based on the values of and , along with the estimated runtime of and various other factors, and we then shrink to fit in the alloted maximum duration. - **kwargs: Additional kwargs that may be passed to the fallback. Returns: - The minimum runtime of , in milliseconds.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py", + "ast_data": "FunctionDef name:benchmark_gpu arg:self arg:_callable arg:estimation_iters arg:memory_warmup_iters arg:benchmark_iters arg:max_benchmark_duration arguments arg arg arg arg arg arg arg Call Call Call Assign Call Call Assign Call For Call Call Call Call Call Assign Call Assign Call Call Call For Call Call Assign Call For Call Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_dump_file_name_to_datum", + "source_code": "def _dump_file_name_to_datum(self, dir_name, file_name):\n debug_dump_rel_path = os.path.join(os.path.relpath(dir_name, self._dump_root), file_name)\n return DebugTensorDatum(self._dump_root, debug_dump_rel_path)", + "docstring": "Obtain a DebugTensorDatum from the directory and file name. Args: dir_name: () Name of the directory in which the dump file resides. file_name: () Base name of the dump file. Returns: () The loaded from the dump file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:_dump_file_name_to_datum arg:self arg:dir_name arg:file_name arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "ask_not_null_addition", + "source_code": "def ask_not_null_addition(self, field_name, model_name):\n return None", + "docstring": "Adding a NOT NULL field to a model.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\questioner.py", + "ast_data": "FunctionDef name:ask_not_null_addition arg:self arg:field_name arg:model_name arguments arg arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "emit_obj_create", + "source_code": "def emit_obj_create(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None:\n event = self._create_event('N', category, name, pid, tid, timestamp)\n event['id'] = object_id\n self._events.append(event)", + "docstring": "Adds an object creation event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:emit_obj_create arg:self arg:category arg:name arg:timestamp arg:pid arg:tid arg:object_id arguments arg arg arg arg arg arg arg Assign Call Assign Call" + }, + { + "library": "numpy", + "name": "astype", + "source_code": "@array_function_dispatch(_astype_dispatcher)\ndef astype(x, dtype, /, *, copy=True, device=None):\n if not (isinstance(x, np.ndarray) or isscalar(x)):\n raise TypeError(f'Input should be a NumPy array or scalar. It is a {type(x)} instead.')\n if device is not None and device != 'cpu':\n raise ValueError(f'Device not understood. Only \"cpu\" is allowed, but received: {device}')\n return x.astype(dtype, copy=copy)", + "docstring": "Copies an array to a specified data type. This function is an Array API compatible alternative to . Parameters ---------- x : ndarray Input NumPy array to cast. `` if passed. .. versionadded:: 2.1.0 Returns ------- out : ndarray An array having the specified data type. See Also -------- ndarray.astype Examples -------- >>> import numpy as np >>> arr = np.array([1, 2, 3]); arr array([1, 2, 3]) >>> np.astype(arr, np.float64) array([1., 2., 3.]) Non-copy case: >>> arr = np.array([1, 2, 3]) >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False) >>> np.shares_memory(arr, arr_noncpy) True", + "type": "function", + "file_path": "numpy\\numpy\\_core\\numeric.py", + "ast_data": "FunctionDef name:astype arguments arg arg arg arg If BoolOp Call Call Raise Call Call If BoolOp Compare Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_IteratorSaveable", + "source_code": "class _IteratorSaveable(BaseSaverBuilder.SaveableObject):\n\n def __init__(self, iterator_resource, name, external_state_policy=options_lib.ExternalStatePolicy.FAIL):\n serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource, external_state_policy=external_state_policy.value)\n specs = [BaseSaverBuilder.SaveSpec(serialized_iterator, '', name + '_STATE', device=iterator_resource.device)]\n super(_IteratorSaveable, self).__init__(iterator_resource, specs, name)\n\n def restore(self, restored_tensors, restored_shapes):\n with ops.colocate_with(self.op):\n return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])", + "docstring": "SaveableObject for saving/restoring iterator state.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "ClassDef name:_IteratorSaveable FunctionDef name:__init__ arg:self arg:iterator_resource arg:name arg:external_state_policy arguments arg arg arg arg Assign Call Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg With Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "init", + "source_code": "def init():\n if not _module_init():\n _pypm.Initialize()\n _module_init(True)\n atexit.register(quit)", + "docstring": "initialize the midi module pygame.midi.init(): return None Call the initialisation function before using the midi module. It is safe to call this more than once.", + "type": "function", + "file_path": "pygame\\src_py\\midi.py", + "ast_data": "FunctionDef name:init arguments If Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "clear_data_home", + "source_code": "@validate_params({'data_home': [str, os.PathLike, None]}, prefer_skip_nested_validation=True)\ndef clear_data_home(data_home=None):\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)", + "docstring": "Delete all the content of the data home cache. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If , the default path is . Examples -------- >>> from sklearn.datasets import clear_data_home >>> clear_data_home() # doctest: +SKIP", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", + "ast_data": "FunctionDef name:clear_data_home arg:data_home arguments arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "train_on_batch", + "source_code": "def train_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n outs, total_loss, output_losses, masks = _process_single_batch(model, inputs, targets, sample_weights=sample_weights, training=True, output_loss_metrics=output_loss_metrics)\n if not isinstance(outs, list):\n outs = [outs]\n metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n total_loss = nest.flatten(total_loss)\n return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}", + "docstring": "Calculates the loss and gradient updates for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': list with a single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py", + "ast_data": "FunctionDef name:train_on_batch arg:model arg:inputs arg:targets arg:sample_weights arg:output_loss_metrics arguments arg arg arg arg arg Assign Call Assign Call If Call Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__make_cmp_key", + "source_code": "def __make_cmp_key(self, value):\n if isinstance(value, (int, float, bool, np.generic, dtypes.DType, TypeSpec, tensor_shape.TensorShape)):\n return value\n if isinstance(value, compat.bytes_or_text_types):\n return value\n if value is None:\n return value\n if isinstance(value, dict):\n return tuple([tuple([self.__make_cmp_key(key), self.__make_cmp_key(value[key])]) for key in sorted(value.keys())])\n if isinstance(value, tuple):\n return tuple([self.__make_cmp_key(v) for v in value])\n if isinstance(value, list):\n return (list, tuple([self.__make_cmp_key(v) for v in value]))\n if isinstance(value, np.ndarray):\n return (np.ndarray, value.shape, TypeSpec.__nested_list_to_tuple(value.tolist()))\n raise ValueError(f'Cannot generate a hashable key for {self} because the _serialize() method returned an unsupported value of type {type(value)}')", + "docstring": "Converts to a hashable key.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:__make_cmp_key arg:self arg:value arguments arg arg If Call Return return:yes If Call Return return:yes If Compare Return return:yes If Call Return return:yes Call Call Call Call Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Raise Call Call" + }, + { + "library": "kornia", + "name": "_normalize_input", + "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-06) -> torch.Tensor:\n if not is_mps_tensor_safe(x):\n sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n else:\n mp = torch.mean(x, dim=(-3, -2, -1), keepdim=True)\n sp = torch.std(x, dim=(-3, -2, -1), keepdim=True)\n return (x - mp.detach()) / (sp.detach() + eps)", + "docstring": "Normalize the input by batch.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\hardnet.py", + "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg If Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "VariableWindowIndexer", + "source_code": "class VariableWindowIndexer(BaseIndexer):\n\n @Appender(get_window_bounds_doc)\n def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]:\n return calculate_variable_window_bounds(num_values, self.window_size, min_periods, center, closed, self.index_array)", + "docstring": "Creates window boundaries that are of variable length, namely for time series.", + "type": "class", + "file_path": "pandas\\pandas\\core\\indexers\\objects.py", + "ast_data": "ClassDef name:VariableWindowIndexer FunctionDef name:get_window_bounds arg:self arg:num_values arg:min_periods arg:center arg:closed arg:step arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "GradIncrementNestingCtxManagerVariable", + "source_code": "class GradIncrementNestingCtxManagerVariable(ContextWrappingVariable):\n _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH)\n\n @staticmethod\n def create(tx: 'InstructionTranslator', **kwargs):\n var = GradIncrementNestingCtxManagerVariable(target_values=None, initial_values=None, **kwargs)\n return var\n\n def enter(self, tx):\n install_guard(self._guards_singleton)\n grad_level = torch._C._functorch._grad_increment_nesting()\n self.set_cleanup_hook(tx, lambda: torch._C._functorch._grad_decrement_nesting())\n self.proxy = tx.output.create_node('call_function', torch._C._functorch._grad_increment_nesting, (), {})\n return variables.ConstantVariable.create(grad_level)\n\n def exit(self, tx: 'InstructionTranslator', *args):\n self.cleanup()\n tx.output.create_node('call_function', torch._C._functorch._grad_decrement_nesting, (), {})\n return variables.ConstantVariable.create(None)", + "docstring": "represents torch.func.grad increment/decrement nesting", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py", + "ast_data": "ClassDef name:GradIncrementNestingCtxManagerVariable Assign Call Call FunctionDef name:create arg:tx arguments arg arg Assign Call Return return:yes FunctionDef name:enter arg:self arg:tx arguments arg arg Call Assign Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "parse_et_yaml_struct", + "source_code": "def parse_et_yaml_struct(es: object) -> ETKernelIndex:\n indices: dict[OperatorName, dict[ETKernelKey, BackendMetadata]] = {}\n for ei in es:\n e = ei.copy()\n funcs = e.pop('func')\n assert isinstance(funcs, str), f'not a str: {funcs}'\n namespace_helper = NamespaceHelper.from_namespaced_entity(namespaced_entity=funcs, max_level=1)\n opname = FunctionSchema.parse(namespace_helper.entity_name).name\n assert opname not in indices, f'Duplicate func found in yaml: {opname} already'\n if len((index := parse_from_yaml(e))) != 0:\n indices[opname] = index\n return ETKernelIndex(indices)", + "docstring": "Given a loaded yaml representing a list of operators, for each op extract the mapping of to (the latter representing the kernel instance that should be used by the kernel key).", + "type": "function", + "file_path": "pytorch\\torchgen\\executorch\\parse.py", + "ast_data": "FunctionDef name:parse_et_yaml_struct arg:es arguments arg For Assign Call Assign Call Call Assign Call Assign Call Compare If Compare Call Call Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_transform", + "source_code": "def _transform(self, X):\n mask = self.get_support()\n if not mask.any():\n warnings.warn('No features were selected: either the data is too noisy or the selection test too strict.', UserWarning)\n if hasattr(X, 'iloc'):\n return X.iloc[:, :0]\n return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0))\n return _safe_indexing(X, mask, axis=1)", + "docstring": "Reduce X to the selected features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py", + "ast_data": "FunctionDef name:_transform arg:self arg:X arguments arg arg Assign Call If Call Call If Call Return return:yes Return return:yes Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, maxpts=None, abseps=1e-05, releps=1e-05):\n self._dist = multivariate_normal_gen(seed)\n self.dim, self.mean, self.cov_object = self._dist._process_parameters(mean, cov, allow_singular)\n self.allow_singular = allow_singular or self.cov_object._allow_singular\n if not maxpts:\n maxpts = 1000000 * self.dim\n self.maxpts = maxpts\n self.abseps = abseps\n self.releps = releps", + "docstring": "Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, default: `numpy.random.Generatornumpy.random.RandomStateseednp.randomnumpy.random.RandomStateseedseedseed`) abseps : float, optional Absolute error tolerance for the cumulative distribution function (default 1e-5) releps : float, optional Relative error tolerance for the cumulative distribution function (default 1e-5) Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]])", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:mean arg:cov arg:allow_singular arg:seed arg:maxpts arg:abseps arg:releps arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign BoolOp If Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "caching_allocator_delete", + "source_code": "def caching_allocator_delete(mem_ptr):\n torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)", + "docstring": "Delete memory allocated using the CUDA memory allocator. Memory allocated with :func:. is freed here. The associated device and stream are tracked inside the allocator. Args: mem_ptr (int): memory address to be freed by the allocator. .. note:: See :ref: for more details about GPU memory management.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:caching_allocator_delete arg:mem_ptr arguments arg Call" + }, + { + "library": "scipy", + "name": "identity", + "source_code": "def identity(n, dtype='d', format=None):\n return eye(n, n, dtype=dtype, format=format)", + "docstring": "Identity matrix in sparse format Returns an identity matrix with shape `eye_arrayeye_arrayeye_array` to take advantage of the sparse array functionality. Parameters ---------- n : int Shape of the identity matrix. dtype : dtype, optional Data type of the matrix format : str, optional Sparse format of the result, e.g., format=\"csr\", etc. Returns ------- new_matrix : sparse matrix A square sparse matrix with ones on the main diagonal and zeros elsewhere. See Also -------- eye_array : Sparse array of chosen shape with ones on a specified diagonal. eye : Sparse matrix of chosen shape with ones on a specified diagonal. Examples -------- >>> import scipy as sp >>> sp.sparse.identity(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> sp.sparse.identity(3, dtype='int8', format='dia') >>> sp.sparse.eye_array(3, dtype='int8', format='dia')", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_construct.py", + "ast_data": "FunctionDef name:identity arg:n arg:dtype arg:format arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "BaseLogger", + "source_code": "class BaseLogger(Callback):\n\n def __init__(self, stateful_metrics=None):\n super(BaseLogger, self).__init__()\n self.stateful_metrics = set(stateful_metrics or [])\n\n def on_epoch_begin(self, epoch, logs=None):\n self.seen = 0\n self.totals = {}\n\n def on_batch_end(self, batch, logs=None):\n logs = logs or {}\n batch_size = logs.get('size', 0)\n num_steps = logs.get('num_steps', 1)\n self.seen += batch_size * num_steps\n for k, v in logs.items():\n if k in self.stateful_metrics:\n self.totals[k] = v\n elif k in self.totals:\n self.totals[k] += v * batch_size\n else:\n self.totals[k] = v * batch_size\n\n def on_epoch_end(self, epoch, logs=None):\n if logs is not None:\n for k in self.params['metrics']:\n if k in self.totals:\n if k in self.stateful_metrics:\n logs[k] = self.totals[k]\n else:\n logs[k] = self.totals[k] / self.seen", + "docstring": "Callback that accumulates epoch averages of metrics. This callback is automatically applied to every Keras model. Args: stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is in . All others will be averaged in .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "ClassDef name:BaseLogger FunctionDef name:__init__ arg:self arg:stateful_metrics arguments arg arg Call Call Assign Call BoolOp FunctionDef name:on_epoch_begin arg:self arg:epoch arg:logs arguments arg arg arg Assign Assign FunctionDef name:on_batch_end arg:self arg:batch arg:logs arguments arg arg arg Assign BoolOp Assign Call Assign Call For Call If Compare Assign If Compare Assign FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg If Compare For If Compare If Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "apply_shuffle_settings", + "source_code": "def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool]=None) -> DataPipe:\n if shuffle is None:\n return datapipe\n graph = traverse_dps(datapipe)\n all_pipes = get_all_graph_pipes(graph)\n shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)]\n if not shufflers and shuffle:\n warnings.warn('`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. Be aware that the default buffer size might not be sufficient for your task.')\n datapipe = datapipe.shuffle()\n shufflers = [datapipe]\n for shuffler in shufflers:\n shuffler.set_shuffle(shuffle)\n return datapipe", + "docstring": "Traverse the graph of `DataPipe` and no-op to the graph)", + "type": "function", + "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py", + "ast_data": "FunctionDef name:apply_shuffle_settings arg:datapipe arg:shuffle arguments arg arg If Compare Return return:yes Assign Call Assign Call Assign Call If BoolOp Call Assign Call Assign For Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_print_row", + "source_code": "def _print_row(fields, positions, print_fn):\n line = ''\n for i, field in enumerate(fields):\n field = str(field)\n end_line_pos = positions[i]\n if i > 0:\n line = line + ' '\n line = '{0:{min_length}}'.format(line + field, min_length=end_line_pos)\n if len(line) > end_line_pos:\n line = line[:end_line_pos - 4] + ' ...'\n print_fn(line)", + "docstring": "Prints a row.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py", + "ast_data": "FunctionDef name:_print_row arg:fields arg:positions arg:print_fn arguments arg arg arg Assign For Call Assign Call Assign If Compare Assign Assign Call If Compare Call Assign Call" + }, + { + "library": "cherrypy", + "name": "_is_daemonized", + "source_code": "def _is_daemonized(self):\n return self._original_pid != os.getpid() and (not os.isatty(sys.stdin.fileno()))", + "docstring": "Check if current process is running as a daemon. The criteria to determine the condition is to verify if the current pid is not the same as the one that got used on the initial construction of the plugin *and* the stdin is not connected to a terminal. The sole validation of the tty is not enough when the plugin is executing inside other process like in a CI tool (Buildbot, Jenkins).", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:_is_daemonized arg:self arguments arg Return return:yes BoolOp Compare Call Call Call" + }, + { + "library": "scipy", + "name": "cpenmsg", + "source_code": "def cpenmsg(solver, iprint, cpen):\n if abs(iprint) < 2:\n return\n elif iprint > 0:\n fname = ''\n else:\n fname = f'{solver.strip()}_output.txt'\n if abs(iprint) >= 3:\n message = f'\\nSet CPEN to {cpen}'\n else:\n message = f'\\n\\nSet CPEN to {cpen}'\n if len(fname) > 0:\n with open(fname, 'a') as f:\n f.write(message)\n else:\n print(message)", + "docstring": "This function prints a message when CPEN is updated.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\message.py", + "ast_data": "FunctionDef name:cpenmsg arg:solver arg:iprint arg:cpen arguments arg arg arg If Compare Call Return return:no If Compare Assign Assign Call If Compare Call Assign Assign If Compare Call With Call Call Call" + }, + { + "library": "matplotlib", + "name": "draw_path", + "source_code": "def draw_path(self, gc, path, transform, rgbFace=None):\n raise NotImplementedError", + "docstring": "Draw a instance using the given affine transform.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_path arg:self arg:gc arg:path arg:transform arg:rgbFace arguments arg arg arg arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "_check_parameters", + "source_code": "@abstractmethod\ndef _check_parameters(self, X):\n pass", + "docstring": "Check initial parameters of the derived class. Parameters ---------- X : array-like of shape (n_samples, n_features)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_check_parameters arg:self arg:X arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_tf_extension_type_with_packed", + "source_code": "def _tf_extension_type_with_packed(self, value):\n copy = _create_object_from_type_and_dict(type(self), self.__dict__)\n copy.__dict__['_tf_extension_type_is_packed'] = value\n return copy", + "docstring": "Returns a copy of this with . Args: value: A boolean value. Returns: A copy of with .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "FunctionDef name:_tf_extension_type_with_packed arg:self arg:value arguments arg arg Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "add", + "source_code": "def add(self, expr: SympyBoolean) -> bool:\n if expr == sympy.true:\n return True\n orig_expr = expr\n orig_reduced = orig_expr.xreplace(self._var_to_val)\n if orig_reduced == sympy.false:\n self._inconsistencies.append(f'{orig_expr} is inconsistent!')\n if isinstance(expr, (sympy.Ne, sympy.Or, sympy.And)) or self._has_unsupported_sympy_function(expr):\n return False\n free_symbols = expr.free_symbols\n assert free_symbols, f'Did not expect constraint with no free variables: {expr}'\n if len(free_symbols) > 1:\n self._multivariate_inequalities.add(expr)\n else:\n s = next(iter(free_symbols))\n old_n_congruences = len(self._congruences[s])\n expr = self.rewrite_with_congruences(s, expr)\n new_n_congruences = len(self._congruences[s])\n if expr == sympy.true:\n return old_n_congruences == new_n_congruences\n reduced = expr.xreplace(self._var_to_val)\n if reduced == sympy.false:\n self._inconsistencies.append(f'{expr}, obtained by rewriting {orig_expr} with congruences, is inconsistent!')\n if isinstance(expr, sympy.Eq):\n self._symbols_with_equalities.add(s)\n self._univariate_inequalities[s].add(expr)\n return False", + "docstring": "Add an expression to the set of constraints. Return whether the expression is a trivial constraint (i.e., an obvious tautology).", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:add arg:self arg:expr arguments arg arg If Compare Return return:yes Assign Assign Call If Compare Call If BoolOp Call Call Return return:yes Assign If Compare Call Call Assign Call Call Assign Call Assign Call Assign Call If Compare Return return:yes Compare Assign Call If Compare Call If Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_function", + "source_code": "def get_function(name, entries):\n contents = '\\nabsl::optional> {name}(\\n const tensorflow::string &op_name) {{\\n static std::array a = {{{{\\n'.format(name=name, count=len(entries) + 1)\n contents += ' '\n contents += '\\n '.join((entries[op_type] for op_type in sorted(entries)))\n contents += '\\n {\"VarHandleOp\"},'\n contents += '\\n }};\\n static const auto &m = *OpGradientInfoInit(a);\\n\\n auto it = m.find(op_name);\\n if (it != m.end()) {\\n return it->second;\\n }\\n return absl::nullopt;\\n}\\n'\n return contents", + "docstring": "Generates lookup function with given name and lookup table entries.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py", + "ast_data": "FunctionDef name:get_function arg:name arg:entries arguments arg arg Assign Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_set_thread_name", + "source_code": "def _set_thread_name(name: str) -> None:\n torch._C._set_thread_name(name)", + "docstring": "Set the name of the current thread. Args: name (str): Name of the current thread.", + "type": "function", + "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", + "ast_data": "FunctionDef name:_set_thread_name arg:name arguments arg Call" + }, + { + "library": "numpy", + "name": "mean", + "source_code": "def mean(self, axis=None, dtype=None, out=None):\n return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)", + "docstring": "Returns the average of the matrix elements along the given axis. Refer to for full documentation. See Also -------- numpy.mean Notes ----- Same as except that, where that returns an , this returns a object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3, 4))) >>> x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.mean() 5.5 >>> x.mean(0) matrix([[4., 5., 6., 7.]]) >>> x.mean(1) matrix([[ 1.5], [ 5.5], [ 9.5]])", + "type": "method", + "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", + "ast_data": "FunctionDef name:mean arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_op_to_colocate_with", + "source_code": "def _op_to_colocate_with(v, graph) -> tuple[Optional[Operation], Optional[Callable[[], None]]]:\n if v is None:\n return (None, None)\n if isinstance(v, Operation):\n return (v, None)\n if hasattr(v, 'handle') and isinstance(v.handle, tensor_lib.Tensor):\n device_only_candidate = lambda: None\n device_only_candidate.device = v.device\n device_only_candidate.name = v.name\n if graph.building_function:\n return (graph.capture(v.handle).op, device_only_candidate)\n else:\n return (v.handle.op, device_only_candidate)\n if isinstance(v, EagerTensor) and (not context.executing_eagerly()):\n return (convert_to_tensor(v, as_ref=True).op, None)\n elif isinstance(v, internal.NativeObject):\n return (v.op, None)\n else:\n return (convert_to_tensor(v, as_ref=True).op, None)", + "docstring": "Operation object corresponding to v to use for colocation constraints.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_op_to_colocate_with arg:v arg:graph arguments arg arg If Compare Return return:no If Call Return return:yes If BoolOp Call Call Assign arguments Assign Assign If Return return:yes Call Return return:yes If BoolOp Call Call Return return:yes Call If Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "prompt_loop_or_load_from_env", + "source_code": "def prompt_loop_or_load_from_env(environ_cp, var_name, var_default, ask_for_var, check_success, error_msg, suppress_default_error=False, resolve_symlinks=False, n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):\n default = environ_cp.get(var_name) or var_default\n full_query = '%s [Default is %s]: ' % (ask_for_var, default)\n for _ in range(n_ask_attempts):\n val = get_from_env_or_user_or_default(environ_cp, var_name, full_query, default)\n if check_success(val):\n break\n if not suppress_default_error:\n print(error_msg % val)\n environ_cp[var_name] = ''\n else:\n raise UserInputError('Invalid %s setting was provided %d times in a row. Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))\n if resolve_symlinks:\n val = os.path.realpath(val)\n environ_cp[var_name] = val\n return val", + "docstring": "Loop over user prompts for an ENV param until receiving a valid response. For the env param var_name, read from the environment or verify user input until receiving valid input. When done, set var_name in the environ_cp to its new value. Args: environ_cp: (Dict) copy of the os.environ. var_name: (String) string for name of environment variable, e.g. \"TF_MYVAR\". var_default: (String) default value string. ask_for_var: (String) string for how to ask for user input. check_success: (Function) function that takes one argument and returns a boolean. Should return True if the value provided is considered valid. May contain a complex error message if error_msg does not provide enough information. In that case, set suppress_default_error to True. error_msg: (String) String with one and only one '%s'. Formatted with each invalid response upon check_success(input) failure. suppress_default_error: (Bool) Suppress the above error message in favor of one from the check_success function. resolve_symlinks: (Bool) Translate symbolic links into the real filepath. n_ask_attempts: (Integer) Number of times to query for valid input before raising an error and quitting. Returns: [String] The value of var_name after querying for input. Raises: UserInputError: if a query has been attempted n_ask_attempts times without success, assume that the user has made a scripting error, and will continue to provide invalid input. Raise the error to avoid infinitely looping.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:prompt_loop_or_load_from_env arg:environ_cp arg:var_name arg:var_default arg:ask_for_var arg:check_success arg:error_msg arg:suppress_default_error arg:resolve_symlinks arg:n_ask_attempts arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Call Assign For Call Assign Call If Call If Call Assign Raise Call If Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_dtype_can_hold_range", + "source_code": "def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:\n if not len(rng):\n return True\n return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)", + "docstring": "_maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), but in many cases a range can be held by a smaller integer dtype. Check if this is one of those cases.", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:_dtype_can_hold_range arg:rng arg:dtype arguments arg arg If Call Return return:yes Return return:yes BoolOp Call Call" + }, + { + "library": "matplotlib", + "name": "_get_ticker_locator_formatter", + "source_code": "def _get_ticker_locator_formatter(self):\n locator = self._locator\n formatter = self._formatter\n minorlocator = self._minorlocator\n if isinstance(self.norm, colors.BoundaryNorm):\n b = self.norm.boundaries\n if locator is None:\n locator = ticker.FixedLocator(b, nbins=10)\n if minorlocator is None:\n minorlocator = ticker.FixedLocator(b)\n elif isinstance(self.norm, colors.NoNorm):\n if locator is None:\n nv = len(self._values)\n base = 1 + int(nv / 10)\n locator = ticker.IndexLocator(base=base, offset=0.5)\n elif self.boundaries is not None:\n b = self._boundaries[self._inside]\n if locator is None:\n locator = ticker.FixedLocator(b, nbins=10)\n else:\n if locator is None:\n locator = self.long_axis.get_major_locator()\n if minorlocator is None:\n minorlocator = self.long_axis.get_minor_locator()\n if minorlocator is None:\n minorlocator = ticker.NullLocator()\n if formatter is None:\n formatter = self.long_axis.get_major_formatter()\n self._locator = locator\n self._formatter = formatter\n self._minorlocator = minorlocator\n _log.debug('locator: %r', locator)", + "docstring": "Return the `` of the colorbar. If they have not been defined (i.e. are *None*), the formatter and locator are retrieved from the axis, or from the value of the boundaries for a boundary norm. Called by update_ticks...", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:_get_ticker_locator_formatter arg:self arguments arg Assign Assign Assign If Call Assign If Compare Assign Call If Compare Assign Call If Call If Compare Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Assign Assign Call" + }, + { + "library": "pytorch", + "name": "is_enabled", + "source_code": "def is_enabled() -> bool:\n return torch._C._cuda_tunableop_is_enabled()", + "docstring": "Returns whether the TunableOp feature is enabled.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\tunable.py", + "ast_data": "FunctionDef name:is_enabled arguments Return return:yes Call" + }, + { + "library": "numpy", + "name": "_get_num_chars", + "source_code": "def _get_num_chars(a):\n if issubclass(a.dtype.type, np.str_):\n return a.itemsize // 4\n return a.itemsize", + "docstring": "Helper function that returns the number of characters per field in a string or unicode array. This is to abstract out the fact that for a unicode array this is itemsize / 4.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:_get_num_chars arg:a arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_IntegerGreaterThan", + "source_code": "class _IntegerGreaterThan(Constraint):\n is_discrete = True\n\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n super().__init__()\n\n def check(self, value):\n return (value % 1 == 0) & (value >= self.lower_bound)\n\n def __repr__(self):\n fmt_string = self.__class__.__name__[1:]\n fmt_string += f'(lower_bound={self.lower_bound})'\n return fmt_string", + "docstring": "Constrain to an integer interval .", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_IntegerGreaterThan Assign FunctionDef name:__init__ arg:self arg:lower_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "django", + "name": "pbkdf2", + "source_code": "def pbkdf2(password, salt, iterations, dklen=0, digest=None):\n if digest is None:\n digest = hashlib.sha256\n dklen = dklen or None\n password = force_bytes(password)\n salt = force_bytes(salt)\n return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)", + "docstring": "Return the hash of password using pbkdf2.", + "type": "function", + "file_path": "django\\django\\utils\\crypto.py", + "ast_data": "FunctionDef name:pbkdf2 arg:password arg:salt arg:iterations arg:dklen arg:digest arguments arg arg arg arg arg If Compare Assign Assign BoolOp Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "open_in_browser", + "source_code": "def open_in_browser(response: TextResponse, _openfunc: Callable[[str], Any]=webbrowser.open) -> Any:\n from scrapy.http import HtmlResponse, TextResponse\n body = response.body\n if isinstance(response, HtmlResponse):\n if b''\n body = re.sub(b']*?>)', to_bytes(repl), body, count=1)\n ext = '.html'\n elif isinstance(response, TextResponse):\n ext = '.txt'\n else:\n raise TypeError(f'Unsupported response type: {response.__class__.__name__}')\n fd, fname = tempfile.mkstemp(ext)\n os.write(fd, body)\n os.close(fd)\n return _openfunc(f'file://{fname}')", + "docstring": "Open *response* in a local web browser, adjusting the _ for external links to work, e.g. so that images and styles are displayed. .. _base tag: For example: .. code-block:: python from scrapy.utils.response import open_in_browser def parse_details(self, response): if \"item name\" not in response.body: open_in_browser(response)", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\response.py", + "ast_data": "FunctionDef name:open_in_browser arg:response arg:_openfunc arguments arg arg Assign If Call If Compare Call Assign Assign Call Call Assign If Call Assign Raise Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "in_y_true_range", + "source_code": "def in_y_true_range(self, y):\n return self.interval_y_true.includes(y)", + "docstring": "Return True if y is in the valid range of y_true. Parameters ---------- y : ndarray", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:in_y_true_range arg:self arg:y arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_to_replicate_tensor", + "source_code": "def _to_replicate_tensor(self, local_tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, current_logical_shape: list[int]) -> torch.Tensor:\n num_chunks = mesh.size(mesh_dim=mesh_dim)\n logical_dim_size = current_logical_shape[self.dim]\n is_padded = logical_dim_size % num_chunks != 0\n if is_padded:\n full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks\n pad_size = full_chunk_size - local_tensor.size(self.dim)\n local_tensor = pad_tensor(local_tensor, self.dim, pad_size)\n if not local_tensor.is_contiguous():\n local_tensor = local_tensor.contiguous()\n result = funcol.all_gather_tensor(local_tensor, gather_dim=self.dim, group=(mesh, mesh_dim))\n if is_padded:\n unpad_size = full_chunk_size * num_chunks - logical_dim_size\n result = unpad_tensor(result, self.dim, unpad_size)\n return result", + "docstring": "This function all_gather all shards and return a tensor that is replicated on the previously sharded mesh dimension", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "FunctionDef name:_to_replicate_tensor arg:self arg:local_tensor arg:mesh arg:mesh_dim arg:current_logical_shape arguments arg arg arg arg arg Assign Call Assign Assign Compare If Assign Assign Call Assign Call If Call Assign Call Assign Call If Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_should_save_on_batch", + "source_code": "def _should_save_on_batch(self, batch):\n if self.save_freq == 'epoch':\n return False\n if batch <= self._last_batch_seen:\n add_batches = batch + 1\n else:\n add_batches = batch - self._last_batch_seen\n self._batches_seen_since_last_saving += add_batches\n self._last_batch_seen = batch\n if self._batches_seen_since_last_saving >= self.save_freq:\n self._batches_seen_since_last_saving = 0\n return True\n return False", + "docstring": "Handles batch-level saving logic, supports steps_per_execution.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_should_save_on_batch arg:self arg:batch arguments arg arg If Compare Return return:yes If Compare Assign Assign Assign If Compare Assign Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "HandlerLine2D", + "source_code": "class HandlerLine2D(HandlerNpoints):\n\n def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans):\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent, width, height, fontsize)\n markevery = None\n if self.get_numpoints(legend) == 1:\n xdata = np.linspace(xdata[0], xdata[-1], 3)\n markevery = [1]\n ydata = np.full_like(xdata, (height - ydescent) / 2)\n legline = Line2D(xdata, ydata, markevery=markevery)\n self.update_prop(legline, orig_handle, legend)\n if legend.markerscale != 1:\n newsz = legline.get_markersize() * legend.markerscale\n legline.set_markersize(newsz)\n legline.set_transform(trans)\n return [legline]", + "docstring": "Handler for instances. See Also -------- HandlerLine2DCompound : An earlier handler implementation, which used one artist for the line and another for the marker(s).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", + "ast_data": "ClassDef name:HandlerLine2D FunctionDef name:create_artists arg:self arg:legend arg:orig_handle arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arg:trans arguments arg arg arg arg arg arg arg arg arg Assign Call Assign If Compare Call Assign Call Assign Assign Call Assign Call Call If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "dimpulse", + "source_code": "def dimpulse(system, x0=None, t=None, n=None):\n if isinstance(system, dlti):\n system = system._as_ss()\n elif isinstance(system, lti):\n raise AttributeError('dimpulse can only be used with discrete-time dlti systems.')\n else:\n system = dlti(*system[:-1], dt=system[-1])._as_ss()\n if n is None:\n n = 100\n if t is None:\n t = np.linspace(0, n * system.dt, n, endpoint=False)\n else:\n t = np.asarray(t)\n yout = None\n for i in range(0, system.inputs):\n u = np.zeros((t.shape[0], system.inputs))\n u[0, i] = 1.0\n one_output = dlsim(system, u, t=t, x0=x0)\n if yout is None:\n yout = (one_output[1],)\n else:\n yout = yout + (one_output[1],)\n tout = one_output[0]\n return (tout, yout)", + "docstring": "Impulse response of discrete-time system. Parameters ---------- system : dlti | tuple An instance of the LTI class or a tuple describing the system. The number of elements in the tuple determine the interpretation. I.e.: * `dltiTransferFunctionZerosPolesGainStateSpaceTransferFunctionZerosPolesGainStateSpacet` is not given). Returns ------- tout : ndarray Time values for the output, as a 1-D array. yout : tuple of ndarray Impulse response of system. Each element of the tuple represents the output of the system based on an impulse in each input. See Also -------- impulse, dstep, dlsim, cont2discrete Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt ... >>> dt = 1 # sampling interval is one => time unit is sample number >>> bb, aa = signal.butter(3, 0.25, fs=1/dt) >>> t, y = signal.dimpulse((bb, aa, dt), n=25) ... >>> fig0, ax0 = plt.subplots() >>> ax0.step(t, np.squeeze(y), '.-', where='post') >>> ax0.set_title(r\"Impulse Response of a $3^\\text{rd}$ Order Butterworth Filter\") >>> ax0.set(xlabel='Sample number', ylabel='Amplitude') >>> ax0.grid() >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:dimpulse arg:system arg:x0 arg:t arg:n arguments arg arg arg arg If Call Assign Call If Call Raise Call Assign Call Call If Compare Assign If Compare Assign Call Assign Call Assign For Call Assign Call Assign Assign Call If Compare Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "create_minified_hlo_graph", + "source_code": "def create_minified_hlo_graph(minified_fx_graph, inputs):\n hlo_dir = f'{os.getcwd()}/hlo_files'\n os.makedirs(hlo_dir, exists_ok=True)\n from torch_xla.stablehlo import save_torch_model_as_stablehlo\n save_torch_model_as_stablehlo(minified_fx_graph, inputs, hlo_dir)", + "docstring": "Takes minified FX graph as primary input, and ports it to HLO via StableHLO Provides minified HLO graph as output, and archive them to local directory", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\fx_minifier.py", + "ast_data": "FunctionDef name:create_minified_hlo_graph arg:minified_fx_graph arg:inputs arguments arg arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "update_renames_v2", + "source_code": "def update_renames_v2(output_file_path):\n function_renames = collect_function_renames()\n constant_renames = collect_constant_renames()\n all_renames = function_renames.union(constant_renames)\n manual_renames = all_renames_v2.manual_symbol_renames\n rename_lines = [get_rename_line(name, canonical_name) for name, canonical_name in all_renames if 'tf.' + name not in manual_renames]\n renames_file_text = '%srenames = {\\n%s\\n}\\n' % (_FILE_HEADER, ',\\n'.join(sorted(rename_lines)))\n file_io.write_string_to_file(output_file_path, renames_file_text)", + "docstring": "Writes a Python dictionary mapping deprecated to canonical API names. Args: output_file_path: File path to write output to. Any existing contents would be replaced.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py", + "ast_data": "FunctionDef name:update_renames_v2 arg:output_file_path arguments arg Assign Call Assign Call Assign Call Assign Assign Call Compare Assign Call Call Call" + }, + { + "library": "django", + "name": "construct_relative_path", + "source_code": "def construct_relative_path(current_template_name, relative_name, allow_recursion=False):\n new_name = relative_name.strip('\\'\"')\n if not new_name.startswith(('./', '../')):\n return relative_name\n if current_template_name is None:\n raise TemplateSyntaxError(f'The relative path {relative_name} cannot be evaluated due to an unknown template origin.')\n new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name))\n if new_name.startswith('../'):\n raise TemplateSyntaxError(\"The relative path '%s' points outside the file hierarchy that template '%s' is in.\" % (relative_name, current_template_name))\n if not allow_recursion and current_template_name.lstrip('/') == new_name:\n raise TemplateSyntaxError(\"The relative path '%s' was translated to template name '%s', the same template in which the tag appears.\" % (relative_name, current_template_name))\n has_quotes = relative_name.startswith(('\"', \"'\")) and relative_name[0] == relative_name[-1]\n return f'\"{new_name}\"' if has_quotes else new_name", + "docstring": "Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name.", + "type": "function", + "file_path": "django\\django\\template\\loader_tags.py", + "ast_data": "FunctionDef name:construct_relative_path arg:current_template_name arg:relative_name arg:allow_recursion arguments arg arg arg Assign Call If Call Return return:yes If Compare Raise Call Assign Call Call Call Call If Call Raise Call If BoolOp Compare Call Raise Call Assign BoolOp Call Compare Return return:yes" + }, + { + "library": "matplotlib", + "name": "_format_maybe_minus_and_locale", + "source_code": "def _format_maybe_minus_and_locale(self, fmt, arg):\n return self.fix_minus((','.join((locale.format_string(part, (arg,), True).replace(',', '{,}') for part in fmt.split(','))) if self._useMathText else locale.format_string(fmt, (arg,), True)) if self._useLocale else fmt % arg)", + "docstring": "Format *arg* with *fmt*, applying Unicode minus and locale if desired.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:_format_maybe_minus_and_locale arg:self arg:fmt arg:arg arguments arg arg arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "coordinatewise_monotone_map", + "source_code": "@classmethod\ndef coordinatewise_monotone_map(cls, x, y, fn):\n x, y = (cls.wrap(x), cls.wrap(y))\n products = [fn(a, b) for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])]\n return ValueRanges(min(products), max(products))", + "docstring": "It's increasing or decreasing on each coordinate.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py", + "ast_data": "FunctionDef name:coordinatewise_monotone_map arg:cls arg:x arg:y arg:fn arguments arg arg arg arg Assign Call Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "to_string", + "source_code": "def to_string(self):\n return self._as_string", + "docstring": "Return a string representation of this . Returns: a string of the form /job:/replica:/task:/device::.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py", + "ast_data": "FunctionDef name:to_string arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "right", + "source_code": "@property\ndef right(self) -> Index:\n from pandas import Index\n return Index(self._right, copy=False)", + "docstring": "Return the right endpoints of each Interval in the IntervalArray as an Index. This property extracts the right endpoints from each interval contained within the IntervalArray. This can be helpful in use cases where you need to work with or compare only the upper bounds of intervals, such as when performing range-based filtering, determining interval overlaps, or visualizing the end boundaries of data segments. See Also -------- arrays.IntervalArray.left : Return the left endpoints of each Interval in the IntervalArray as an Index. arrays.IntervalArray.mid : Return the midpoint of each Interval in the IntervalArray as an Index. arrays.IntervalArray.contains : Check elementwise if the Intervals contain the value. Examples -------- >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) >>> interv_arr [(0, 1], (2, 5]] Length: 2, dtype: interval[int64, right] >>> interv_arr.right Index([1, 5], dtype='int64')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\interval.py", + "ast_data": "FunctionDef name:right arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "_check_save_as", + "source_code": "def _check_save_as(self, obj):\n if not isinstance(obj.save_as, bool):\n return must_be('a boolean', option='save_as', obj=obj, id='admin.E101')\n else:\n return []", + "docstring": "Check save_as is a boolean.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_save_as arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no" + }, + { + "library": "matplotlib", + "name": "set_theta_direction", + "source_code": "def set_theta_direction(self, direction):\n mtx = self._direction.get_matrix()\n if direction in ('clockwise', -1):\n mtx[0, 0] = -1\n elif direction in ('counterclockwise', 'anticlockwise', 1):\n mtx[0, 0] = 1\n else:\n _api.check_in_list([-1, 1, 'clockwise', 'counterclockwise', 'anticlockwise'], direction=direction)\n self._direction.invalidate()", + "docstring": "Set the direction in which theta increases. clockwise, -1: Theta increases in the clockwise direction counterclockwise, anticlockwise, 1: Theta increases in the counterclockwise direction", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:set_theta_direction arg:self arg:direction arguments arg arg Assign Call If Compare Assign If Compare Assign Call Call" + }, + { + "library": "numpy", + "name": "_leading_trailing", + "source_code": "def _leading_trailing(a, edgeitems, index=()):\n axis = len(index)\n if axis == a.ndim:\n return a[index]\n if a.shape[axis] > 2 * edgeitems:\n return concatenate((_leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])), axis=axis)\n else:\n return _leading_trailing(a, edgeitems, index + np.index_exp[:])", + "docstring": "Keep only the N-D corners (leading and trailing edges) of an array. Should be passed a base-class ndarray, since it makes no guarantees about preserving subclasses.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:_leading_trailing arg:a arg:edgeitems arg:index arguments arg arg arg Assign Call If Compare Return return:yes If Compare Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "environment", + "source_code": "@property\ndef environment(self):\n return ''", + "docstring": "Returns the current environment which TensorFlow is running in.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:environment arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "riccati_jn", + "source_code": "def riccati_jn(n, x):\n if not (isscalar(n) and isscalar(x)):\n raise ValueError('arguments must be scalars.')\n n = _nonneg_int_or_fail(n, 'n', strict=False)\n if n == 0:\n n1 = 1\n else:\n n1 = n\n jn = np.empty((n1 + 1,), dtype=np.float64)\n jnp = np.empty_like(jn)\n _rctj(x, out=(jn, jnp))\n return (jn[:n + 1], jnp[:n + 1])", + "docstring": "Compute Ricatti-Bessel function of the first kind and its derivative. The Ricatti-Bessel function of the first kind is defined as :math:, where :math: is the spherical Bessel function of the first kind of order :math:. This function computes the value and first derivative of the Ricatti-Bessel function for all orders up to and including . Parameters ---------- n : int Maximum order of function to compute x : float Argument at which to evaluate Returns ------- jn : ndarray Value of j0(x), ..., jn(x) jnp : ndarray First derivative j0'(x), ..., jn'(x) Notes ----- The computation is carried out via backward recurrence, using the relation DLMF 10.51.1 [2]_. Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996. .. [2] NIST Digital Library of Mathematical Functions.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:riccati_jn arg:n arg:x arguments arg arg If BoolOp Call Call Raise Call Assign Call If Compare Assign Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "compute_buffer_groups", + "source_code": "def compute_buffer_groups(self, lines):\n name_to_group = {}\n for line in lines:\n if isinstance(line, AllocateLine):\n name = line.node.get_name()\n assert name not in name_to_group\n name_to_group[name] = BufferGroup(line.node)\n elif isinstance(line, ReuseLine):\n old_name = line.node.get_name()\n new_name = line.reused_as.get_name()\n assert new_name not in name_to_group\n if old_name in name_to_group:\n name_to_group[old_name].names.append(new_name)\n name_to_group[new_name] = name_to_group[old_name]\n outputs = OrderedSet(V.graph.get_output_names())\n unique_groups = [*{id(g): g for g in name_to_group.values()}.values()]\n for group in unique_groups:\n group.is_output = any((x in outputs for x in group.names))\n assert self.buffer_groups is None\n self.buffer_groups = unique_groups\n return name_to_group", + "docstring": "Populates self.buffer_groups with BufferGroup objects that join allocations with common storage (due to inplace reuse) into a single object.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:compute_buffer_groups arg:self arg:lines arguments arg arg Assign For If Call Assign Call Compare Assign Call If Call Assign Call Assign Call Compare If Compare Call Assign Assign Call Call Assign Call Call Call For Assign Call Compare Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_record_and_ignore_transient_ps_failure", + "source_code": "def _record_and_ignore_transient_ps_failure(self, e):\n if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):\n return False\n ps_tasks = _extract_failed_ps_instances(str(e))\n with self._potential_ps_failures_lock:\n for t in ps_tasks:\n self._potential_ps_failures_count[t] += 1\n if self._potential_ps_failures_count[t] >= self._transient_ps_failures_threshold:\n return False\n return True", + "docstring": "Records potential PS failures and return if failure should be ignored.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_record_and_ignore_transient_ps_failure arg:self arg:e arguments arg arg If BoolOp Compare Call Return return:yes Assign Call Call With For If Compare Return return:yes Return return:yes" + }, + { + "library": "scrapy", + "name": "close", + "source_code": "def close(self) -> None:\n self.head_plugin.close()", + "docstring": "Close the target file along with all the plugins.", + "type": "method", + "file_path": "scrapy\\scrapy\\extensions\\postprocessing.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "tpu_core_ids_to_locations", + "source_code": "def tpu_core_ids_to_locations(self, tpu_core_ids):\n return _pywrap_dtensor_device.TPUCoreIDsToLocations(context.context()._handle, self._device_info, tpu_core_ids)", + "docstring": "Translates TPU core IDs to TPU core locations. Args: tpu_core_ids: A list of TPU core IDs. Each one is an unsigned integer. Returns: A list of corresponding TPU core locations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", + "ast_data": "FunctionDef name:tpu_core_ids_to_locations arg:self arg:tpu_core_ids arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_all_gather_base", + "source_code": "@_exception_logger\n@deprecated('`torch.distributed._all_gather_base` is a private function and will be deprecated. Please use `torch.distributed.all_gather_into_tensor` instead.', category=FutureWarning)\ndef _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):\n return all_gather_into_tensor(output_tensor, input_tensor, group, async_op)", + "docstring": "Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor. Args: output_tensor (Tensor): Output tensor. It should contain correctly-sized tensors to be used for output of the collective. input_tensor (Tensor): Tensor to be broadcast from current process. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. warning:: is a private function. Users should use instead.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_all_gather_base arg:output_tensor arg:input_tensor arg:group arg:async_op arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "time_and_count", + "source_code": "def time_and_count(fn: Callable[Concatenate[Any, P], T]) -> Callable[Concatenate[Any, P], T]:\n\n @wraps(fn)\n def wrapper(self: Any, *args: P.args, **kwargs: P.kwargs) -> T:\n fn_qual_name = f'{self.__class__.__name__}.{fn.__name__}'\n counters['inductor'][f'benchmarking.{fn_qual_name}'] += 1\n with dynamo_timed(fn_qual_name, log_pt2_compile_event=False):\n return fn(self, *args, **kwargs)\n return wrapper", + "docstring": "Wraps with context, and increments the appropriate dynamo counters. It is expected that is a method of or one of its subclasses; typing limitations prevent us from declaring this directly.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py", + "ast_data": "FunctionDef name:time_and_count arg:fn arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg Assign With Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_make_numeric_only", + "source_code": "def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT:\n result = obj.select_dtypes(include=['number'], exclude=['timedelta'])\n return result", + "docstring": "Subset DataFrame to numeric columns. Parameters ---------- obj : DataFrame Returns ------- obj subset to numeric-only columns.", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\rolling.py", + "ast_data": "FunctionDef name:_make_numeric_only arg:self arg:obj arguments arg arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_plugin_asset", + "source_code": "def get_plugin_asset(plugin_asset_cls, graph=None):\n if graph is None:\n graph = ops.get_default_graph()\n if not plugin_asset_cls.plugin_name:\n raise ValueError('Class %s has no plugin_name' % plugin_asset_cls.__name__)\n name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name\n container = graph.get_collection(name)\n if container:\n if len(container) != 1:\n raise ValueError('Collection for %s had %d items, expected 1' % (name, len(container)))\n instance = container[0]\n if not isinstance(instance, plugin_asset_cls):\n raise ValueError('Plugin name collision between classes %s and %s' % (plugin_asset_cls.__name__, instance.__class__.__name__))\n else:\n instance = plugin_asset_cls()\n graph.add_to_collection(name, instance)\n graph.add_to_collection(_PLUGIN_ASSET_PREFIX, plugin_asset_cls.plugin_name)\n return instance", + "docstring": "Acquire singleton PluginAsset instance from a graph. PluginAssets are always singletons, and are stored in tf Graph collections. This way, they can be defined anywhere the graph is being constructed, and if the same plugin is configured at many different points, the user can always modify the same instance. Args: plugin_asset_cls: The PluginAsset class graph: (optional) The graph to retrieve the instance from. If not specified, the default graph is used. Returns: An instance of the plugin_asset_class Raises: ValueError: If we have a plugin name collision, or if we unexpectedly find the wrong number of items in a collection.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py", + "ast_data": "FunctionDef name:get_plugin_asset arg:plugin_asset_cls arg:graph arguments arg arg If Compare Assign Call If Raise Call Assign Assign Call If If Compare Call Raise Call Call Assign If Call Raise Call Assign Call Call Call Return return:yes" + }, + { + "library": "uvicorn", + "name": "shutdown", + "source_code": "def shutdown(self) -> None:\n if self.cycle is None or self.cycle.response_complete:\n event = h11.ConnectionClosed()\n self.conn.send(event)\n self.transport.close()\n else:\n self.cycle.keep_alive = False", + "docstring": "Called by the server to commence a graceful shutdown.", + "type": "method", + "file_path": "uvicorn\\uvicorn\\protocols\\http\\h11_impl.py", + "ast_data": "FunctionDef name:shutdown arg:self arguments arg If BoolOp Compare Assign Call Call Call Assign" + }, + { + "library": "django", + "name": "resolve_expression", + "source_code": "def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):\n c = self.copy()\n c.is_summary = summarize\n source_expressions = [expr.resolve_expression(query, allow_joins, reuse, summarize) if expr is not None else None for expr in c.get_source_expressions()]\n if not self.allows_composite_expressions and any((isinstance(expr, ColPairs) for expr in source_expressions)):\n raise ValueError(f'{self.__class__.__name__} expression does not support composite primary keys.')\n c.set_source_expressions(source_expressions)\n return c", + "docstring": "Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query.", + "type": "method", + "file_path": "django\\django\\db\\models\\expressions.py", + "ast_data": "FunctionDef name:resolve_expression arg:self arg:query arg:allow_joins arg:reuse arg:summarize arg:for_save arguments arg arg arg arg arg arg Assign Call Assign Assign Compare Call Call If BoolOp Call Call Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "generate_dequeue_op", + "source_code": "def generate_dequeue_op(self, tpu_device=0):\n self.freeze()\n if self._generated_dequeue_op and (not ops.inside_function()):\n raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n self._generated_dequeue_op = True\n full_name = '%s/dequeue' % self._name\n sharded_shapes = [policy.get_unpartitioned_shape(policy.get_sharded_shape(shape)) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n if tpu_device is not None:\n with ops.device(tpu_name_util.core(tpu_device)):\n dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n else:\n dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n if self._number_of_partitions <= 1:\n return dequeue_op\n partitions = [policy.get_unpartitioned_shape([1] * shape.ndims).as_list() for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n return tag_sharding_attribute_for_dequeued_tensors(dequeue_op, partitions)", + "docstring": "Generates the device-side Op to dequeue a tuple from the queue. Implicitly freezes the queue configuration if it is not already frozen, which will raise errors if the shapes and types have not been fully specified. Args: tpu_device: The TPU device ordinal where the infeed instruction should be placed. If None, no explicit placement will be performed, and it is up to the user to call this API from within a proper TPU device scope. The XLA code will fail if the TPU dequeue instruction is not bound to any device. Returns: A list of Outputs corresponding to a shard of infeed dequeued into XLA, suitable for use within a replicated block. Raises: ValueError: if the types or shapes of the tuple elements have not been set; or if a dequeue op has already been generated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:generate_dequeue_op arg:self arg:tpu_device arguments arg arg Call If BoolOp Call Raise Call Assign Assign Assign Call Call Call If Compare With Call Call Assign Call Assign Call If Compare Return return:yes Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_copy_trackable_to_cpu", + "source_code": "def _copy_trackable_to_cpu(self, object_map):\n if self not in object_map:\n op_device = pydev.DeviceSpec.from_string(self.device).replace(device_type='CPU', device_index=0).to_string()\n with ops.device(op_device):\n new_var = UninitializedVariable(trainable=self.trainable, shape=self.shape, dtype=self.dtype, name=self._shared_name)\n object_map[self] = new_var\n destination_var = object_map[self]\n with ops.device(destination_var.device):\n destination_var.assign(self.read_value())", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare Assign Call Call Call With Call Assign Call Assign Assign With Call Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "def predict_proba(self, X, check_input=True):\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n if self.n_outputs_ == 1:\n return proba[:, :self.n_classes_]\n else:\n all_proba = []\n for k in range(self.n_outputs_):\n proba_k = proba[:, k, :self.n_classes_[k]]\n all_proba.append(proba_k)\n return all_proba", + "docstring": "Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arg:check_input arguments arg arg arg Call Assign Call Assign Call If Compare Return return:yes Assign For Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register_graph_pattern", + "source_code": "def register_graph_pattern(pattern: PatternExpr, extra_check: Callable[[Match], bool]=_return_true, *, pass_dict: _PassDictsType, prepend: bool=False) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n\n def decorator(handler: Callable[..., Any]) -> Callable[..., Any]:\n assert callable(handler)\n GraphPatternEntry(pattern=pattern, extra_check=extra_check, handler=handler).register(pass_dict, prepend=prepend)\n return handler\n return decorator", + "docstring": "Register a pattern that runs a function on the FX graph, allowing custom transformation code.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py", + "ast_data": "FunctionDef name:register_graph_pattern arg:pattern arg:extra_check arguments arg arg arg arg FunctionDef name:decorator arg:handler arguments arg Call Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "guarantee_const", + "source_code": "@tf_export('guarantee_const')\n@deprecation.deprecated(None, 'Not for public use.')\ndef guarantee_const(input, name=None):\n return gen_array_ops.guarantee_const(input=input, name=name)", + "docstring": "Promise to the TF runtime that the input tensor is a constant. The runtime is then free to make optimizations based on this. Returns the input tensor without modification. Args: input: A . name: A name for this operation. Returns: A . Has the same dtype as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:guarantee_const arg:input arg:name arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "convert_variables_to_tensors", + "source_code": "def convert_variables_to_tensors(values):\n\n def _convert_resource_variable_to_tensor(x):\n if _pywrap_utils.IsResourceVariable(x):\n return ops.convert_to_tensor(x)\n elif isinstance(x, composite_tensor.CompositeTensor):\n return composite_tensor.convert_variables_to_tensors(x)\n else:\n return x\n return nest.map_structure(_convert_resource_variable_to_tensor, values)", + "docstring": "Converts s in to s. If an object is a and overrides its method, its components will also be converted to s. Objects other than s in will be returned unchanged. Args: values: A nested structure of s, or any other objects. Returns: A new structure with s in converted to s.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\variable_utils.py", + "ast_data": "FunctionDef name:convert_variables_to_tensors arg:values arguments arg FunctionDef name:_convert_resource_variable_to_tensor arg:x arguments arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "M", + "source_code": "def M(self):\n return MONTHS_3[self.data.month].title()", + "docstring": "Month, textual, 3 letters; e.g. 'Jan'", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:M arg:self arguments arg Return return:yes Call" + }, + { + "library": "scrapy", + "name": "parse_node", + "source_code": "def parse_node(self, response: Response, selector: Selector) -> Any:\n if hasattr(self, 'parse_item'):\n return self.parse_item(response, selector)\n raise NotImplementedError", + "docstring": "This method must be overridden with your custom spider functionality", + "type": "method", + "file_path": "scrapy\\scrapy\\spiders\\feed.py", + "ast_data": "FunctionDef name:parse_node arg:self arg:response arg:selector arguments arg arg arg If Call Return return:yes Call Raise" + }, + { + "library": "numpy", + "name": "__generator_ctor", + "source_code": "def __generator_ctor(bit_generator_name='MT19937', bit_generator_ctor=__bit_generator_ctor):\n if isinstance(bit_generator_name, BitGenerator):\n return Generator(bit_generator_name)\n return Generator(bit_generator_ctor(bit_generator_name))", + "docstring": "Pickling helper function that returns a Generator object Parameters ---------- bit_generator_name : str or BitGenerator String containing the core BitGenerator's name or a BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. Returns ------- rg : Generator Generator using the named core BitGenerator", + "type": "function", + "file_path": "numpy\\numpy\\random\\_pickle.py", + "ast_data": "FunctionDef name:__generator_ctor arg:bit_generator_name arg:bit_generator_ctor arguments arg arg If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "constant", + "source_code": "def constant(self, value: Union[bool, float, int], dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Produces a scalar constant of type dtype.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:constant arg:self arg:value arg:dtype arguments arg arg arg Raise" + }, + { + "library": "scipy", + "name": "diff", + "source_code": "def diff(x, order=1, period=None, _cache=_cache):\n if isinstance(_cache, threading.local):\n if not hasattr(_cache, 'diff_cache'):\n _cache.diff_cache = {}\n _cache = _cache.diff_cache\n tmp = asarray(x)\n if order == 0:\n return tmp\n if iscomplexobj(tmp):\n return diff(tmp.real, order, period, _cache) + 1j * diff(tmp.imag, order, period, _cache)\n if period is not None:\n c = 2 * pi / period\n else:\n c = 1.0\n n = len(x)\n omega = _cache.get((n, order, c))\n if omega is None:\n if len(_cache) > 20:\n while _cache:\n _cache.popitem()\n\n def kernel(k, order=order, c=c):\n if k:\n return pow(c * k, order)\n return 0\n omega = convolve.init_convolution_kernel(n, kernel, d=order, zero_nyquist=1)\n _cache[n, order, c] = omega\n overwrite_x = _datacopied(tmp, x)\n return convolve.convolve(tmp, omega, swap_real_imag=order % 2, overwrite_x=overwrite_x)", + "docstring": "Return kth derivative (or integral) of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j y_0 = 0 if order is not 0. Parameters ---------- x : array_like Input array. order : int, optional The order of differentiation. Default order is 1. If order is negative, then integration is carried out under the assumption that ``, the Nyquist mode is taken zero.", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", + "ast_data": "FunctionDef name:diff arg:x arg:order arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Compare Return return:yes If Call Return return:yes Call Call If Compare Assign Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arg:order arg:c arguments arg arg arg If Return return:yes Call Return return:yes Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None):\n self._more_validate_params()\n lr = 'pa1' if self.loss == 'epsilon_insensitive' else 'pa2'\n return self._fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)", + "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : numpy array of shape [n_samples] Target values. coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arguments arg arg arg arg arg Call Assign Compare Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "generate_value_label", + "source_code": "def generate_value_label(self, byteorder: str) -> bytes:\n encoding = self._encoding\n bio = BytesIO()\n null_byte = b'\\x00'\n bio.write(struct.pack(byteorder + 'i', self.len))\n labname = str(self.labname)[:32].encode(encoding)\n lab_len = 32 if encoding not in ('utf-8', 'utf8') else 128\n labname = _pad_bytes(labname, lab_len + 1)\n bio.write(labname)\n for i in range(3):\n bio.write(struct.pack('c', null_byte))\n bio.write(struct.pack(byteorder + 'i', self.n))\n bio.write(struct.pack(byteorder + 'i', self.text_len))\n for offset in self.off:\n bio.write(struct.pack(byteorder + 'i', offset))\n for value in self.val:\n bio.write(struct.pack(byteorder + 'i', value))\n for text in self.txt:\n bio.write(text + null_byte)\n return bio.getvalue()", + "docstring": "Generate the binary representation of the value labels. Parameters ---------- byteorder : str Byte order of the output Returns ------- value_label : bytes Bytes containing the formatted value label", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:generate_value_label arg:self arg:byteorder arguments arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Compare Assign Call Call For Call Call Call Call Call Call Call For Call Call For Call Call For Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Ackley03", + "source_code": "class Ackley03(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-32.0] * self.N, [32.0] * self.N))\n self.global_optimum = [[-0.68255758, -0.36070859]]\n self.fglob = -195.6290282592388\n\n def fun(self, x, *args):\n self.nfev += 1\n a = -200 * exp(-0.02 * sqrt(x[0] ** 2 + x[1] ** 2))\n a += 5 * exp(cos(3 * x[0]) + sin(3 * x[1]))\n return a", + "docstring": "Ackley03 [1]_ objective function. The Ackley03 global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ackley03}}(x) = -200 e^{-0.02 \\sqrt{x_1^2 + x_2^2}} + 5e^{\\cos(3x_1) + \\sin(3x_2)} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think the minus sign is missing in front of the first term in eqn3 in [1]_. This changes the global minimum", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py", + "ast_data": "ClassDef name:Ackley03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "unpackage_script_module", + "source_code": "def unpackage_script_module(importer: PackageImporter, script_module_id: str) -> torch.nn.Module:\n if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):\n raise RuntimeError('Loading ScriptObjects from a PackageImporter created from a directory is not supported. Use a package archive file instead.')\n cu = torch._C.CompilationUnit()\n cpp_module = torch._C._import_ir_module_from_package(cu, importer.zip_reader, importer.storage_context, validate_map_location(importer.last_map_location), script_module_id)\n return wrap_cpp_module(cpp_module)", + "docstring": "Call by `` archive.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_script.py", + "ast_data": "FunctionDef name:unpackage_script_module arg:importer arg:script_module_id arguments arg arg If Call Raise Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_run_node_and_set_meta", + "source_code": "def _run_node_and_set_meta(self, node) -> Any:\n out = super().run_node(node)\n self.env[node] = out\n node.meta.update(((k, v) for k, v in fx_traceback.get_current_meta().items() if k not in node.meta))\n node.meta['val'] = proxy_tensor.extract_val(out)\n return out", + "docstring": "Run node and set meta according to . This should be used on new nodes or nodes that have been modified. By default does not update . Set to the current meta, except for , which is recomputed.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "FunctionDef name:_run_node_and_set_meta arg:self arg:node arguments arg arg Assign Call Call Assign Call Call Call Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "adaptive_max_pool1d_with_indices", + "source_code": "def adaptive_max_pool1d_with_indices(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool=False) -> tuple[Tensor, Tensor]:\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices)\n return torch.adaptive_max_pool1d(input, output_size)", + "docstring": "adaptive_max_pool1d(input, output_size, return_indices=False) Applies a 1D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer) return_indices: whether to return pooling indices. Default: ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:adaptive_max_pool1d_with_indices arg:input arg:output_size arg:return_indices arguments arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "join", + "source_code": "@inlineCallbacks\ndef join(self) -> Generator[Deferred[Any], Any, None]:\n while self._active:\n yield DeferredList(self._active)", + "docstring": "join() Returns a deferred that is fired when all managed :attr: have completed their executions.", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "FunctionDef name:join arg:self arguments arg While Call" + }, + { + "library": "tensorflow", + "name": "_lookup_dependency", + "source_code": "def _lookup_dependency(self, name):\n unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)\n if unconditional is not None:\n return unconditional\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key\n return self._weights.get((name, graph_key), None)", + "docstring": "From Trackable. Find a weight in the current graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:_lookup_dependency arg:self arg:name arguments arg arg Assign Call Call If Compare Return return:yes If Call Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "scatter_div", + "source_code": "@tf_export(v1=['scatter_div'])\ndef scatter_div(ref, indices, updates, use_locking=False, name=None):\n if ref.dtype._is_ref_dtype:\n return gen_state_ops.scatter_div(ref, indices, updates, use_locking=use_locking, name=name)\n return ref._lazy_read(gen_resource_variable_ops.resource_scatter_div(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))", + "docstring": "Divides a variable reference by sparse updates. This operation computes This operation outputs after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple reference the same location, their contributions divide. Requires or . Args: ref: A mutable . Must be one of the following types: , , , , , , , , , , , , , , , , . Should be from a node. indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of values that is divided by. use_locking: An optional . Defaults to . If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py", + "ast_data": "FunctionDef name:scatter_div arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "font_size", + "source_code": "@property\ndef font_size(self):\n return self.font.size", + "docstring": "The font size.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "FunctionDef name:font_size arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "size", + "source_code": "@abc.abstractmethod\ndef size(self) -> int:\n pass", + "docstring": "Returns the size of the queue at the time this method is called. Note that by the time `` method is called. That is, the following assertion should hold: size = q.size() res = q.get(size, timeout=0) assert size == len(res) -- or -- size = q.size() res = q.get(size * 2, timeout=1) assert size <= len(res) <= size * 2", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py", + "ast_data": "FunctionDef name:size arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "assert_scalar_v2", + "source_code": "@tf_export('debugging.assert_scalar', v1=[])\n@dispatch.add_dispatch_support\ndef assert_scalar_v2(tensor, message=None, name=None):\n assert_scalar(tensor=tensor, message=message, name=name)", + "docstring": "Asserts that the given is a scalar. This function raises unless it can be certain that the given is a scalar. is also raised if the shape of is unknown. This is always checked statically, so this method returns nothing. Args: tensor: A . message: A string to prefix to the default message. name: A name for this operation. Defaults to \"assert_scalar\" Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_scalar_v2 arg:tensor arg:message arg:name arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "value_dtype", + "source_code": "@property\ndef value_dtype(self):\n return self._value_dtype", + "docstring": "The table value dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:value_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "validate_jti", + "source_code": "def validate_jti(self, claims, jti):\n raise NotImplementedError()", + "docstring": "Validate if the given `` value is used before. Developers MUST implement this method:: def validate_jti(self, claims, jti): key = \"jti:{}-{}\".format(claims[\"sub\"], jti) if redis.get(key): return False redis.set(key, 1, ex=3600) return True", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7523\\client.py", + "ast_data": "FunctionDef name:validate_jti arg:self arg:claims arg:jti arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "get_input_names", + "source_code": "def get_input_names(self):\n return self._input_names", + "docstring": "Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:get_input_names arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_next", + "source_code": "@abc.abstractmethod\ndef get_next(self):\n raise NotImplementedError('Iterator.get_next()')", + "docstring": "Returns the next element. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> print(iterator.get_next()) tf.Tensor(42, shape=(), dtype=int32) Returns: A (nested) structure of values matching . Raises: : If the end of the iterator has been reached.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:get_next arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "conv2d_backprop_input", + "source_code": "@tf_export(v1=['nn.conv2d_backprop_input'])\n@dispatch.add_dispatch_support\ndef conv2d_backprop_input(input_sizes, filter=None, out_backprop=None, strides=None, padding=None, use_cudnn_on_gpu=True, data_format='NHWC', dilations=[1, 1, 1, 1], name=None, filters=None):\n filter = deprecation.deprecated_argument_lookup('filters', filters, 'filter', filter)\n padding, explicit_paddings = convert_padding(padding)\n return gen_nn_ops.conv2d_backprop_input(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name)", + "docstring": "Computes the gradients of convolution with respect to the input. Args: input_sizes: A of type . An integer vector representing the shape of , where is a 4-D tensor. filter: A . Must be one of the following types: , , , . 4-D with shape . out_backprop: A . Must have the same type as . 4-D with shape . Gradients w.r.t. the output of the convolution. strides: A list of . The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the or indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is , this should be in the form . When explicit padding used and data_format is , this should be in the form . use_cudnn_on_gpu: An optional . Defaults to . data_format: An optional from: . Defaults to . Specify the data format of the input and output data. With the default format \"NHWC\", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be \"NCHW\", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of . Defaults to . 1-D tensor of length 4. The dilation factor for each dimension of . If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of , see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:conv2d_backprop_input arg:input_sizes arg:filter arg:out_backprop arg:strides arg:padding arg:use_cudnn_on_gpu arg:data_format arg:dilations arg:name arg:filters arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "f1", + "source_code": "def f1(x):\n return x * (x - 1.0)", + "docstring": "f1 is a quadratic with roots at 0 and 1", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:f1 arg:x arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "tile", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef tile(x, n):\n if isinstance(n, int):\n n = [n]\n return array_ops.tile(x, n)", + "docstring": "Creates a tensor by tiling by . Args: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in . Returns: A tiled tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:tile arg:x arg:n arguments arg arg If Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_mark_prior_graph_output_as_aliased", + "source_code": "def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex) -> None:\n depth, output_index = index\n node = list(self._path_from_root)[depth]\n node.unaliased_in_all_paths[output_index] = False\n x = self.path_weakrefs[depth][output_index]\n assert x is not None\n x.remove_extra_reference()", + "docstring": "Remove a graph output from the unaliased, cached tensors in an ancestor node", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:_mark_prior_graph_output_as_aliased arg:self arg:index arguments arg arg Assign Assign Call Assign Assign Compare Call" + }, + { + "library": "pandas", + "name": "_get_period_range_edges", + "source_code": "def _get_period_range_edges(first: Period, last: Period, freq: BaseOffset, closed: Literal['right', 'left']='left', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None) -> tuple[Period, Period]:\n if not all((isinstance(obj, Period) for obj in [first, last])):\n raise TypeError(\"'first' and 'last' must be instances of type Period\")\n first_ts = first.to_timestamp()\n last_ts = last.to_timestamp()\n adjust_first = not freq.is_on_offset(first_ts)\n adjust_last = freq.is_on_offset(last_ts)\n first_ts, last_ts = _get_timestamp_range_edges(first_ts, last_ts, freq, unit='ns', closed=closed, origin=origin, offset=offset)\n first = (first_ts + int(adjust_first) * freq).to_period(freq)\n last = (last_ts - int(adjust_last) * freq).to_period(freq)\n return (first, last)", + "docstring": "Adjust the provided and Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default \"left\" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': is 1970-01-01 - 'start': is the first value of the timeseries - 'start_day': is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects.", + "type": "function", + "file_path": "pandas\\pandas\\core\\resample.py", + "ast_data": "FunctionDef name:_get_period_range_edges arg:first arg:last arg:freq arg:closed arg:origin arg:offset arguments arg arg arg arg arg arg If Call Call Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_quantized_input_stats_required", + "source_code": "def _is_quantized_input_stats_required(conversion_flags: _conversion_flags_pb2.ConverterFlags) -> bool:\n quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8]\n return (conversion_flags.inference_type in quantized_inference_types or conversion_flags.inference_input_type in quantized_inference_types) and (not conversion_flags.post_training_quantize)", + "docstring": "Checks if the flag is required for conversion. Args: conversion_flags: A protocol buffer describing the conversion process. Returns: True, if the or the is a quantized type and it is not post training quantization, else False.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", + "ast_data": "FunctionDef name:_is_quantized_input_stats_required arg:conversion_flags arguments arg Assign Return return:yes BoolOp BoolOp Compare Compare" + }, + { + "library": "django", + "name": "_check_list_display_links", + "source_code": "def _check_list_display_links(self, obj):\n from django.contrib.admin.options import ModelAdmin\n if obj.list_display_links is None:\n return []\n elif not isinstance(obj.list_display_links, (list, tuple)):\n return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')\n elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:\n return list(chain.from_iterable((self._check_list_display_links_item(obj, field_name, 'list_display_links[%d]' % index) for index, field_name in enumerate(obj.list_display_links))))\n return []", + "docstring": "Check that list_display_links is a unique subset of list_display.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_list_display_links arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call If Compare Return return:yes Call Call Call Call Return return:no" + }, + { + "library": "pytorch", + "name": "sdpa_flop_count", + "source_code": "def sdpa_flop_count(query_shape, key_shape, value_shape):\n b, h, s_q, d_q = query_shape\n _b2, _h2, s_k, _d2 = key_shape\n _b3, _h3, _s3, d_v = value_shape\n assert b == _b2 == _b3 and h == _h2 == _h3 and (d_q == _d2) and (s_k == _s3) and (d_q == _d2)\n total_flops = 0\n total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))\n total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v))\n return total_flops", + "docstring": "Count flops for self-attention. NB: We can assume that value_shape == key_shape", + "type": "function", + "file_path": "pytorch\\torch\\utils\\flop_counter.py", + "ast_data": "FunctionDef name:sdpa_flop_count arg:query_shape arg:key_shape arg:value_shape arguments arg arg arg Assign Assign Assign BoolOp Compare Compare Compare Compare Compare Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "reduce_scatter_tensor_autograd", + "source_code": "def reduce_scatter_tensor_autograd(self: torch.Tensor, reduceOp: str, scatter_dim: int, group: RANK_TYPES, tag: str=''):\n group_name = _resolve_group_name(group, tag)\n group_size = c10d._get_group_size_by_name(group_name)\n assert self.size(scatter_dim) % group_size == 0, f'input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}'\n if scatter_dim != 0:\n tensor_list = torch.chunk(self, group_size, dim=scatter_dim)\n self = torch.cat(tensor_list)\n tensor = torch.ops._c10d_functional_autograd.reduce_scatter_tensor(self, reduceOp.lower(), group_size, group_name)\n res = _FromTorchTensor.apply(tensor)\n return res", + "docstring": "Reduces the tensor data across all machines in such a way that all get the final result, then scatter the results to corresponding ranks. This function is the same as reduce_scatter_tensor but will propagate the backwards gradient across workers. Currently only the \"sum\" reduceOp is supported. See reduce_scatter_tensor for more details on usage.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", + "ast_data": "FunctionDef name:reduce_scatter_tensor_autograd arg:self arg:reduceOp arg:scatter_dim arg:group arg:tag arguments arg arg arg arg arg Assign Call Assign Call Compare Call Call If Compare Assign Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_do_batch_all_reduce_sparse", + "source_code": "def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):\n logging.log_first_n(logging.WARN, 'Efficient allreduce is not supported for %d IndexedSlices' % len(sparse_values), 10)\n return self._simple_cross_replica_ops.batch_reduce(reduce_op, zip(sparse_values, sparse_values))", + "docstring": "Run batch all-reduce for sparse values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:_do_batch_all_reduce_sparse arg:self arg:reduce_op arg:sparse_values arguments arg arg arg Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "zmap", + "source_code": "@xp_capabilities()\ndef zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):\n like_zscore = scores is compare\n xp = array_namespace(scores, compare)\n scores, compare = xp_promote(scores, compare, force_floating=True, xp=xp)\n with warnings.catch_warnings():\n if like_zscore:\n warnings.simplefilter('ignore', SmallSampleWarning)\n mn = _xp_mean(compare, axis=axis, keepdims=True, nan_policy=nan_policy)\n std = _xp_var(compare, axis=axis, correction=ddof, keepdims=True, nan_policy=nan_policy) ** 0.5\n with np.errstate(invalid='ignore', divide='ignore'):\n z = _demean(scores, mn, axis, xp=xp, precision_warning=False) / std\n if like_zscore:\n eps = xp.finfo(z.dtype).eps\n zero = std <= xp.abs(eps * mn)\n zero = xp.broadcast_to(zero, z.shape)\n z = xpx.at(z, zero).set(xp.nan)\n return z", + "docstring": "Calculate the relative z-scores. Return an array of z-scores, i.e., scores that are standardized to zero mean and unit variance, where mean and variance are calculated from the comparison array. Parameters ---------- scores : array_like The input for which z-scores are calculated. compare : array_like The input from which the mean and standard deviation of the normalization are taken; assumed to have the same dimension as . axis : int or None, optional Axis over which mean and variance of are calculated. Default is 0. If None, compute over the whole array . ddof : int, optional Degrees of freedom correction in the calculation of the standard deviation. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle the occurrence of nans in . 'propagate' returns nan, 'raise' raises an exception, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Note that when the value is 'omit', nans in also propagate to the output, but they do not affect the z-scores computed for the non-nan values. Returns ------- zscore : array_like Z-scores, in the same shape as . Notes ----- This function preserves ndarray subclasses, and works also with matrices and masked arrays (it uses instead of for parameters). Examples -------- >>> from scipy.stats import zmap >>> a = [0.5, 2.0, 2.5, 3] >>> b = [0, 1, 2, 3, 4] >>> zmap(a, b) array([-1.06066017, 0. , 0.35355339, 0.70710678])", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:zmap arg:scores arg:compare arg:axis arg:ddof arg:nan_policy arguments arg arg arg arg arg Assign Compare Assign Call Assign Call With Call If Call Assign Call Assign Call With Call Assign Call If Assign Call Assign Compare Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "generate_square_subsequent_mask", + "source_code": "@staticmethod\ndef generate_square_subsequent_mask(sz: int, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)", + "docstring": "Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\transformer.py", + "ast_data": "FunctionDef name:generate_square_subsequent_mask arg:sz arg:device arg:dtype arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "build_graph", + "source_code": "def build_graph(device, input_shape, variable, num_inputs, axis, grad):\n with ops.device('/%s:0' % device):\n if not variable:\n inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)]\n elif axis == 1:\n inputs = [array_ops.zeros([input_shape[0], random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)]) for _ in range(num_inputs)]\n else:\n inputs = [array_ops.zeros([random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5), input_shape[1]]) for _ in range(num_inputs)]\n outputs = [array_ops.concat(inputs, axis) for _ in range(100)]\n if grad:\n return control_flow_ops.group(*list(itertools.chain.from_iterable([gradients_impl.gradients(output, inputs) for output in outputs])))\n else:\n return control_flow_ops.group(*outputs)", + "docstring": "Build a graph containing a sequence of concat operations. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not to randomize the input shape num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient Returns: An array of tensors to run()", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\concat_benchmark.py", + "ast_data": "FunctionDef name:build_graph arg:device arg:input_shape arg:variable arg:num_inputs arg:axis arg:grad arguments arg arg arg arg arg arg With Call If Assign Call Call If Compare Assign Call Call Call Call Assign Call Call Call Call Assign Call Call If Return return:yes Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "online_softmax_combine", + "source_code": "@triton.jit\ndef online_softmax_combine(lhs_max, lhs_sum, rhs_max, use_fast_math: tl.constexpr):\n out_max = maximum(lhs_max, rhs_max)\n lhs_scale = tl.where(out_max == float('-inf'), 1.0, exp(lhs_max - out_max, use_fast_math))\n rhs_scale = tl.where(out_max == float('-inf'), 1.0, exp(rhs_max - out_max, use_fast_math))\n out_sum = lhs_sum * lhs_scale + rhs_scale\n return (out_max, out_sum)", + "docstring": "When we do combine, we assume lhs is the accumulator and rhs is the next block of data. Then rhs_sum is always 1. With that assumption, we can save some registers and computation.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py", + "ast_data": "FunctionDef name:online_softmax_combine arg:lhs_max arg:lhs_sum arg:rhs_max arg:use_fast_math arguments arg arg arg arg Assign Call Assign Call Compare Call Call Assign Call Compare Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "InstanceNorm1d", + "source_code": "class InstanceNorm1d(_InstanceNorm):\n\n def _get_no_batch_dim(self):\n return 2\n\n def _check_input_dim(self, input):\n if input.dim() not in (2, 3):\n raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')", + "docstring": "Applies Instance Normalization. This operation applies Instance Normalization over a 2D (unbatched) or 3D (batched) input as described in the paper __. .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. :math: and :math: are learnable parameter vectors of size (where is the number of features or channels of the input) if :attr: is `torch.var(input, unbiased=False)track_running_statsmomentummomentum\\hat{x}_\\text{new} = (1 - \\text{momentum}) \\times \\hat{x} + \\text{momentum} \\times x_t\\hat{x}x_tInstanceNorm1dLayerNormInstanceNorm1dLayerNormLayerNormInstanceNorm1dC(N, C, L)(C, L)(N, C, L)(C, L)` (same shape as input) Examples:: >>> # Without Learnable Parameters >>> m = nn.InstanceNorm1d(100) >>> # With Learnable Parameters >>> m = nn.InstanceNorm1d(100, affine=True) >>> input = torch.randn(20, 100, 40) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py", + "ast_data": "ClassDef name:InstanceNorm1d FunctionDef name:_get_no_batch_dim arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "CollectiveAllReduceStrategy", + "source_code": "@tf_export('distribute.MultiWorkerMirroredStrategy', v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n _collective_key_base = 0\n\n def __init__(self, cluster_resolver=None, communication_options=None):\n if communication_options is None:\n communication_options = collective_util.Options()\n super(CollectiveAllReduceStrategy, self).__init__(CollectiveAllReduceExtended(self, cluster_resolver=cluster_resolver, communication_options=communication_options))\n distribute_lib.distribution_strategy_gauge.get_cell('V2').set('MultiWorkerMirroredStrategy')\n distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended._num_devices_per_worker)\n\n @classmethod\n def _from_local_devices(cls, devices, communication_options=None):\n obj = cls(communication_options=communication_options)\n obj.extended._initialize_local(tfconfig_cluster_resolver.TFConfigClusterResolver(), devices=devices)\n return obj\n\n @property\n def cluster_resolver(self):\n return self.extended._cluster_resolver", + "docstring": "A distribution strategy for synchronous training on multiple workers. This strategy implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to , it replicates all variables and computations to each local device. The difference is that it uses a distributed collective implementation (e.g. all-reduce), so that multiple workers can work together. You need to launch your program on each worker and configure correctly. For example, if you are using , each worker needs to have its corresponding and set in the environment variable. An example TF_CONFIG on worker-0 of a two worker cluster is: Your program runs on each worker as-is. Note that collectives require each worker to participate. All and non API may use collectives internally, e.g. checkpointing and saving since reading a with all-reduces the value. Therefore it's recommended to run exactly the same program on each worker. Dispatching based on or of the worker is error-prone. determines the number of GPUs the strategy uses. If it's zero, the strategy uses the CPU. All workers need to use the same number of devices, otherwise the behavior is undefined. This strategy is not intended for TPU. Use instead. After setting up TF_CONFIG, using this strategy is similar to using and . You can also write your own training loop: See [Multi-worker training with Keras]( for a detailed tutorial. __Saving__ You need to save and checkpoint on all workers instead of just one. This is because variables whose synchronization=ON_READ triggers aggregation during saving. It's recommended to save to a different path on each worker to avoid race conditions. Each worker saves the same thing. See [Multi-worker training with Keras]( tutorial for examples. __Known Issues__ * does not return the correct number of accelerators. The strategy uses all available GPUs if is or . * In eager mode, the strategy needs to be created before calling any other Tensorflow API.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py", + "ast_data": "ClassDef name:CollectiveAllReduceStrategy Assign FunctionDef name:__init__ arg:self arg:cluster_resolver arg:communication_options arguments arg arg arg If Compare Assign Call Call Call Call Call Call Call Call Call Call FunctionDef name:_from_local_devices arg:cls arg:devices arg:communication_options arguments arg arg arg Assign Call Call Call Return return:yes FunctionDef name:cluster_resolver arg:self arguments arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "introspect_token", + "source_code": "def introspect_token(self, token):\n raise NotImplementedError()", + "docstring": "Read given token and return its introspection metadata as a dictionary following _:: def introspect_token(self, token): return { \"active\": True, \"client_id\": token.client_id, \"token_type\": token.token_type, \"username\": get_token_username(token), \"scope\": token.get_scope(), \"sub\": get_token_user_sub(token), \"aud\": token.client_id, \"iss\": \" \"exp\": token.expires_at, \"iat\": token.issued_at, } .. _:", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py", + "ast_data": "FunctionDef name:introspect_token arg:self arg:token arguments arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_estimate_gaussian_covariances_diag", + "source_code": "def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):\n avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]\n avg_means2 = means ** 2\n return avg_X2 - avg_means2 + reg_covar", + "docstring": "Estimate the diagonal covariance vectors. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariances : array, shape (n_components, n_features) The covariance vector of the current components.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py", + "ast_data": "FunctionDef name:_estimate_gaussian_covariances_diag arg:resp arg:X arg:nk arg:means arg:reg_covar arguments arg arg arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_logical_device_configuration", + "source_code": "def get_logical_device_configuration(self, dev):\n self._initialize_physical_devices()\n if dev not in self._physical_devices:\n raise ValueError('Unrecognized device: %s' % repr(dev))\n return self._virtual_device_map.get(dev)", + "docstring": "Get the virtual device configuration for a PhysicalDevice.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:get_logical_device_configuration arg:self arg:dev arguments arg arg Call If Compare Raise Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "perplexity", + "source_code": "def perplexity(self, X, sub_sampling=False):\n check_is_fitted(self)\n X = self._check_non_neg_array(X, reset_n_features=True, whom='LatentDirichletAllocation.perplexity')\n return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)", + "docstring": "Calculate approximate perplexity for data X. Perplexity is defined as exp(-1. * log-likelihood per word) .. versionchanged:: 0.19 *doc_topic_distr* argument has been deprecated and is ignored because user no longer has access to unnormalized distribution Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. sub_sampling : bool Do sub-sampling or not. Returns ------- score : float Perplexity score.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py", + "ast_data": "FunctionDef name:perplexity arg:self arg:X arg:sub_sampling arguments arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "check_per_example_loss_rank", + "source_code": "@tf_contextlib.contextmanager\ndef check_per_example_loss_rank(per_example_loss):\n loss_rank = per_example_loss.shape.rank\n if loss_rank is not None:\n if loss_rank == 0:\n raise ValueError(f'Invalid value passed for `per_example_loss`. Expected a tensor with at least rank 1. Received per_example_loss={per_example_loss} with rank {loss_rank}')\n yield\n else:\n with ops.control_dependencies([check_ops.assert_greater_equal(array_ops.rank(per_example_loss), math_ops.cast(1, dtype=dtypes.int32), message='Invalid value passed for `per_example_loss`. Expected a tensor with at least rank 1.')]):\n yield", + "docstring": "Context manager that checks that the rank of per_example_loss is at least 1. Args: per_example_loss: Per example loss tensor. Yields: A context manager.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py", + "ast_data": "FunctionDef name:check_per_example_loss_rank arg:per_example_loss arguments arg Assign If Compare If Compare Raise Call With Call Call Call Call" + }, + { + "library": "numpy", + "name": "min_scalar_type", + "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)\ndef min_scalar_type(a):\n return (a,)", + "docstring": "min_scalar_type(a, /) For scalar ``, returns the vector's dtype unmodified. Floating point values are not demoted to integers, and complex values are not demoted to floats. Parameters ---------- a : scalar or array_like The value whose minimal data type is to be found. Returns ------- out : dtype The minimal data type. See Also -------- result_type, promote_types, dtype, can_cast Examples -------- >>> import numpy as np >>> np.min_scalar_type(10) dtype('uint8') >>> np.min_scalar_type(-260) dtype('int16') >>> np.min_scalar_type(3.1) dtype('float16') >>> np.min_scalar_type(1e50) dtype('float64') >>> np.min_scalar_type(np.arange(4,dtype='f8')) dtype('float64')", + "type": "function", + "file_path": "numpy\\numpy\\_core\\multiarray.py", + "ast_data": "FunctionDef name:min_scalar_type arg:a arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Event", + "source_code": "class Event:\n\n def __init__(self, enable_timing: bool=False) -> None:\n self.__eventId = torch._C._mps_acquireEvent(enable_timing)\n\n def __del__(self) -> None:\n if hasattr(torch._C, '_mps_releaseEvent') and self.__eventId > 0:\n torch._C._mps_releaseEvent(self.__eventId)\n\n def record(self) -> None:\n torch._C._mps_recordEvent(self.__eventId)\n\n def wait(self) -> None:\n torch._C._mps_waitForEvent(self.__eventId)\n\n def query(self) -> bool:\n return torch._C._mps_queryEvent(self.__eventId)\n\n def synchronize(self) -> None:\n torch._C._mps_synchronizeEvent(self.__eventId)\n\n def elapsed_time(self, end_event: 'Event') -> float:\n return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)", + "docstring": "Wrapper around an MPS event. MPS events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize MPS streams. Args: enable_timing (bool, optional): indicates if the event should measure time (default: ``)", + "type": "class", + "file_path": "pytorch\\torch\\mps\\event.py", + "ast_data": "ClassDef name:Event FunctionDef name:__init__ arg:self arg:enable_timing arguments arg arg Assign Call FunctionDef name:__del__ arg:self arguments arg If BoolOp Call Compare Call FunctionDef name:record arg:self arguments arg Call FunctionDef name:wait arg:self arguments arg Call FunctionDef name:query arg:self arguments arg Return return:yes Call FunctionDef name:synchronize arg:self arguments arg Call FunctionDef name:elapsed_time arg:self arg:end_event arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "run_fn", + "source_code": "def run_fn(inputs):\n fn_result = fn(ctx, inputs)\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n if flat_last_step_outputs:\n with ops.control_dependencies([fn_result]):\n return [array_ops.identity(f) for f in flat_last_step_outputs]\n else:\n return fn_result", + "docstring": "Single step on the TPU device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:run_fn arg:inputs arguments arg Assign Call Assign Call If With Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_async", + "source_code": "def is_async():\n return context().is_async()", + "docstring": "Returns true if current thread is in async mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:is_async arguments Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "slice_inputs", + "source_code": "def slice_inputs(self, indices_dataset, inputs):\n flat_inputs = nest.flatten(inputs)\n\n def dynamic_shape_like(t):\n shape = list(t.shape)\n shape[0] = None\n return tuple(shape)\n flat_dtypes = [inp.dtype for inp in flat_inputs]\n contiguous = True\n if self._shuffle and self._shuffle != 'batch':\n contiguous = False\n\n def grab_batch(indices):\n\n def py_method(ind):\n\n def slice_array(data):\n return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous)\n return [slice_array(inp) for inp in flat_inputs]\n flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)\n for v, original_inp in zip(flat_out, flat_inputs):\n v.set_shape(dynamic_shape_like(original_inp))\n return nest.pack_sequence_as(inputs, flat_out)\n dataset = indices_dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n return dataset", + "docstring": "Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:slice_inputs arg:self arg:indices_dataset arg:inputs arguments arg arg arg Assign Call FunctionDef name:dynamic_shape_like arg:t arguments arg Assign Call Assign Return return:yes Call Assign Assign If BoolOp Compare Assign FunctionDef name:grab_batch arg:indices arguments arg FunctionDef name:py_method arg:ind arguments arg FunctionDef name:slice_array arg:data arguments arg Return return:yes Call Call Return return:yes Call Assign Call For Call Call Call Return return:yes Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_asdict", + "source_code": "def _asdict(self):\n out = _dict(_zip(self._fields, self))\n out.update(self.__dict__)\n return out", + "docstring": "Return a new dict which maps field names to their values.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_bunch.py", + "ast_data": "FunctionDef name:_asdict arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "index_expanded_dims_and_copy_", + "source_code": "def index_expanded_dims_and_copy_(dst: torch.Tensor, src: torch.Tensor, expanded_dims: list[int]) -> None:\n dst = index_expanded_dims(dst, expanded_dims)\n src = index_expanded_dims(src, expanded_dims)\n dst.copy_(src)", + "docstring": "Index into expanded dimensions of both dst and src then copy_", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\compile_fx.py", + "ast_data": "FunctionDef name:index_expanded_dims_and_copy_ arg:dst arg:src arg:expanded_dims arguments arg arg arg Assign Call Assign Call Call" + }, + { + "library": "scipy", + "name": "var", + "source_code": "def var(self, alpha, n):\n a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)\n n, Sa = (n[..., np.newaxis], Sa[..., np.newaxis])\n return n * a / Sa * (1 - a / Sa) * (n + Sa) / (1 + Sa)", + "docstring": "The variance of the Dirichlet multinomial distribution. Parameters ---------- %(_dirichlet_mn_doc_default_callparams)s Returns ------- out: array_like The variances of the components of the distribution. This is the diagonal of the covariance matrix of the distribution.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:var arg:self arg:alpha arg:n arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "trigger_tool", + "source_code": "def trigger_tool(self, name):\n self.toolmanager.trigger_tool(name, sender=self)", + "docstring": "Trigger the tool. Parameters ---------- name : str Name (id) of the tool triggered from within the container.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:trigger_tool arg:self arg:name arguments arg arg Call" + }, + { + "library": "scikit-learn", + "name": "_get_doc_link", + "source_code": "def _get_doc_link(self):\n if self.__class__.__module__.split('.')[0] != self._doc_link_module:\n return ''\n if self._doc_link_url_param_generator is None:\n estimator_name = self.__class__.__name__\n estimator_module = '.'.join(itertools.takewhile(lambda part: not part.startswith('_'), self.__class__.__module__.split('.')))\n return self._doc_link_template.format(estimator_module=estimator_module, estimator_name=estimator_name)\n return self._doc_link_template.format(**self._doc_link_url_param_generator())", + "docstring": "Generates a link to the API documentation for a given estimator. This method generates the link to the estimator's documentation page by using the template defined by the attribute . Returns ------- url : str The URL to the API documentation for this estimator. If the estimator does not belong to module , the empty string (i.e. ) is returned.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\base.py", + "ast_data": "FunctionDef name:_get_doc_link arg:self arguments arg If Compare Call Return return:yes If Compare Assign Assign Call Call arguments arg Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "CameraModel", + "source_code": "class CameraModel:\n\n def __init__(self, image_size: ImageSize, model_type: CameraModelType, params: Tensor) -> None:\n self._model = get_model_from_type(model_type, image_size, params)\n\n def __getattr__(self, name: str) -> Any:\n return getattr(self._model, name)\n\n def __repr__(self) -> str:\n return f'CameraModel({self.image_size}, {self._model.__class__.__name__}, {self.params})'", + "docstring": "Class to represent camera models. Example: >>> # Pinhole Camera Model >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.PINHOLE, torch.Tensor([328., 328., 320., 240.])) >>> # Brown Conrady Camera Model >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.BROWN_CONRADY, torch.Tensor([1.0, 1.0, 1.0, 1.0, ... 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])) >>> # Kannala Brandt K3 Camera Model >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.KANNALA_BRANDT_K3, torch.Tensor([1.0, 1.0, 1.0, ... 1.0, 1.0, 1.0, 1.0, 1.0])) >>> # Orthographic Camera Model >>> cam = CameraModel(ImageSize(480, 640), CameraModelType.ORTHOGRAPHIC, torch.Tensor([328., 328., 320., 240.])) >>> cam.params tensor([328., 328., 320., 240.])", + "type": "class", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "ClassDef name:CameraModel FunctionDef name:__init__ arg:self arg:image_size arg:model_type arg:params arguments arg arg arg arg Assign Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "legvander", + "source_code": "def legvander(x, deg):\n ideg = pu._as_int(deg, 'deg')\n if ideg < 0:\n raise ValueError('deg must be non-negative')\n x = np.array(x, copy=None, ndmin=1) + 0.0\n dims = (ideg + 1,) + x.shape\n dtyp = x.dtype\n v = np.empty(dims, dtype=dtyp)\n v[0] = x * 0 + 1\n if ideg > 0:\n v[1] = x\n for i in range(2, ideg + 1):\n v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i\n return np.moveaxis(v, 0, -1)", + "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = L_i(x) where `VxcVxx`.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign For Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "SACTradeOffStats", + "source_code": "@dataclass\nclass SACTradeOffStats:\n n_segments: int\n slopes: list[float]\n intercepts: list[float]\n fit_breaks: list[float]\n tradeoff_curve: OrderedDict[float, float]\n sac_memory: int\n sac_runtime: float", + "docstring": "Stores statistics for activation-checkpointing trade-off. Attributes: n_segments (int): Number of piecewise linear segments fitted to the trade-off curve. slopes (List[float]): Slopes of the pieces of linear segments fitted to the trade-off curve. intercepts (List[float]): Intercepts of the of the pieces of linear segments fitted to the trade-off curve. fit_breaks (List[float]): Breakpoints of the of the pieces of linear segments fitted to the trade-off curve. tradeoff_curve (OrderedDict[float, float]): Trade-off curve data of memory discarded vs recomputation time. sac_memory (int): Total memory of operations available for activation checkpointing in bytes. sac_runtime (float): Total runtime of operations available for activation checkpointing in milliseconds.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py", + "ast_data": "ClassDef name:SACTradeOffStats" + }, + { + "library": "tensorflow", + "name": "scatter_sub", + "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n return gen_state_ops.scatter_sub(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", + "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "Invert", + "source_code": "class Invert(Module):\n\n def __init__(self, max_val: Optional[Tensor]=None) -> None:\n super().__init__()\n if max_val is None:\n max_val = torch.tensor(1.0)\n if not isinstance(max_val, Parameter):\n self.register_buffer('max_val', max_val)\n else:\n self.max_val = max_val\n\n def forward(self, input: Tensor) -> Tensor:\n return invert(input, self.max_val)", + "docstring": "Invert the values of an input tensor by its maximum value. Args: input: The input tensor to invert with an arbitatry shape. max_val: The expected maximum value in the input tensor. The shape has to according to the input tensor shape, or at least has to work with broadcasting. Default: 1.0. Example: >>> img = torch.rand(1, 2, 4, 4) >>> Invert()(img).shape torch.Size([1, 2, 4, 4]) >>> img = 255. * torch.rand(1, 2, 3, 4, 4) >>> Invert(torch.as_tensor(255.))(img).shape torch.Size([1, 2, 3, 4, 4]) >>> img = torch.rand(1, 3, 4, 4) >>> Invert(torch.as_tensor([[[[1.]]]]))(img).shape torch.Size([1, 3, 4, 4])", + "type": "class", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "ClassDef name:Invert FunctionDef name:__init__ arg:self arg:max_val arguments arg arg Call Call If Compare Assign Call If Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "ExtractIsoWeekDay", + "source_code": "class ExtractIsoWeekDay(Extract):\n lookup_name = 'iso_week_day'", + "docstring": "Return Monday=1 through Sunday=7, based on ISO-8601.", + "type": "class", + "file_path": "django\\django\\db\\models\\functions\\datetime.py", + "ast_data": "ClassDef name:ExtractIsoWeekDay Assign" + }, + { + "library": "scipy", + "name": "_angular_rate_to_rotvec_dot_matrix", + "source_code": "def _angular_rate_to_rotvec_dot_matrix(rotvecs):\n norm = np.linalg.norm(rotvecs, axis=1)\n k = np.empty_like(norm)\n mask = norm > 0.0001\n nm = norm[mask]\n k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm ** 2\n mask = ~mask\n nm = norm[mask]\n k[mask] = 1 / 12 + 1 / 720 * nm ** 2\n skew = _create_skew_matrix(rotvecs)\n result = np.empty((len(rotvecs), 3, 3))\n result[:] = np.identity(3)\n result[:] += 0.5 * skew\n result[:] += k[:, None, None] * np.matmul(skew, skew)\n return result", + "docstring": "Compute matrices to transform angular rates to rot. vector derivatives. The matrices depend on the current attitude represented as a rotation vector. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. Returns ------- ndarray, shape (n, 3, 3)", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py", + "ast_data": "FunctionDef name:_angular_rate_to_rotvec_dot_matrix arg:rotvecs arguments arg Assign Call Assign Call Assign Compare Assign Assign Call Assign Assign Assign Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pygame", + "name": "metrics", + "source_code": "def metrics(self, text):\n return self.get_metrics(text)", + "docstring": "metrics(text) -> list Gets the metrics for each character in the passed string.", + "type": "method", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:metrics arg:self arg:text arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_scaled_loss", + "source_code": "def get_scaled_loss(self, loss):\n if callable(loss):\n\n def new_loss():\n loss_val = loss()\n return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)\n return new_loss\n else:\n return loss * math_ops.cast(self.loss_scale, loss.dtype)", + "docstring": "Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with . In that case, call this method to scale the loss before passing the loss to . If you use or , loss scaling is automatically applied and this method is unneeded. If this method is called, should also be called. See the doc for an example. Args: loss: The loss, which will be multiplied by the loss scale. Can either be a tensor or a callable returning a tensor. Returns: multiplied by .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:get_scaled_loss arg:self arg:loss arguments arg arg If Call FunctionDef name:new_loss arguments Assign Call Return return:yes Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "fill_binop", + "source_code": "def fill_binop(left, right, fill_value):\n if fill_value is not None:\n left_mask = isna(left)\n right_mask = isna(right)\n mask = left_mask ^ right_mask\n if left_mask.any():\n left = left.copy()\n left[left_mask & mask] = fill_value\n if right_mask.any():\n right = right.copy()\n right[right_mask & mask] = fill_value\n return (left, right)", + "docstring": "If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None and NAs are present.", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\array_ops.py", + "ast_data": "FunctionDef name:fill_binop arg:left arg:right arg:fill_value arguments arg arg arg If Compare Assign Call Assign Call Assign If Call Assign Call Assign If Call Assign Call Assign Return return:yes" + }, + { + "library": "numpy", + "name": "combine_paths", + "source_code": "def combine_paths(self, *args):\n return combine_paths(*args)", + "docstring": "Return a list of existing paths composed by all combinations of items from the arguments.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "FunctionDef name:combine_paths arg:self arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_handle_complex", + "source_code": "def _handle_complex(tensor):\n return torch.view_as_real(tensor) if not isinstance(tensor, torch.nn.UninitializedParameter) and tensor.is_complex() else tensor", + "docstring": "Returns a real view of a tensor if complex dtype else just the tensor need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule", + "type": "function", + "file_path": "pytorch\\torch\\_utils.py", + "ast_data": "FunctionDef name:_handle_complex arg:tensor arguments arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "cherrypy", + "name": "state", + "source_code": "@property\ndef state(self):\n return self._state", + "docstring": "The bus state.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\win32.py", + "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "resize_fn", + "source_code": "def resize_fn(images_t, new_size):\n if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR:\n return gen_image_ops.resize_bilinear(images_t, new_size, align_corners=align_corners)\n elif method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR:\n return gen_image_ops.resize_nearest_neighbor(images_t, new_size, align_corners=align_corners)\n elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC:\n return gen_image_ops.resize_bicubic(images_t, new_size, align_corners=align_corners)\n elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA:\n return gen_image_ops.resize_area(images_t, new_size, align_corners=align_corners)\n else:\n raise ValueError('Resize method is not implemented: {}'.format(method))", + "docstring": "Legacy resize core function, passed to _resize_images_common.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:resize_fn arg:images_t arg:new_size arguments arg arg If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_get_control_flow_context", + "source_code": "def _get_control_flow_context(self):\n return self._control_flow_context", + "docstring": "Returns the control flow context of this op. Returns: A context object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_get_control_flow_context arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_inverse_poly_zero", + "source_code": "def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):\n return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)", + "docstring": "Inverse cubic interpolation f-values -> x-values Given four points (fa, a), (fb, b), (fc, c), (fd, d) with fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points and compute x=IP(0).", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_zeros_py.py", + "ast_data": "FunctionDef name:_inverse_poly_zero arg:a arg:b arg:c arg:d arg:fa arg:fb arg:fc arg:fd arguments arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "qspline1d", + "source_code": "def qspline1d(signal, lamb=0.0):\n xp = array_namespace(signal)\n if lamb != 0.0:\n raise ValueError('Smoothing quadratic splines not supported yet.')\n else:\n return xp.asarray(_quadratic_coeff(signal))", + "docstring": "Compute quadratic spline coefficients for rank-1 array. Parameters ---------- signal : ndarray A rank-1 array representing samples of a signal. lamb : float, optional Smoothing coefficient (must be zero for now). Returns ------- c : ndarray Quadratic spline coefficients. See Also -------- qspline1d_eval : Evaluate a quadratic spline at the new set of points. Notes ----- Find the quadratic spline coefficients for a 1-D signal assuming mirror-symmetric boundary conditions. To obtain the signal back from the spline representation mirror-symmetric-convolve these coefficients with a length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . Examples -------- We can filter a signal to reduce and smooth out high-frequency noise with a quadratic spline: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import qspline1d, qspline1d_eval >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 0.], 100) >>> sig += rng.standard_normal(len(sig))*0.05 # add noise >>> time = np.linspace(0, len(sig)) >>> filtered = qspline1d_eval(qspline1d(sig), time) >>> plt.plot(sig, label=\"signal\") >>> plt.plot(time, filtered, label=\"filtered\") >>> plt.legend() >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_spline_filters.py", + "ast_data": "FunctionDef name:qspline1d arg:signal arg:lamb arguments arg arg Assign Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_create_grad_send_info", + "source_code": "def _create_grad_send_info(self, args_recv_info: tuple) -> list[Optional[int]]:\n grad_send_info: list[Optional[int]] = []\n\n def map_recv_to_send(a):\n if isinstance(a, _RecvInfo):\n grad_send_info.append(a.source)\n return a.source\n else:\n grad_send_info.append(None)\n return None\n map_aggregate(args_recv_info, map_recv_to_send)\n logger.debug('%s Grad send info: %s', self.log_prefix, grad_send_info)\n return grad_send_info", + "docstring": "Create a list of stage indices to send gradients to.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:_create_grad_send_info arg:self arg:args_recv_info arguments arg arg FunctionDef name:map_recv_to_send arg:a arguments arg If Call Call Return return:yes Call Return return:no Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Future", + "source_code": "class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):\n\n def __init__(self, *, devices: Optional[list[Union[int, str, torch.device]]]=None):\n if devices is None:\n devices = []\n super().__init__([torch.device(d) for d in devices])\n\n def done(self) -> bool:\n return super().done()\n\n def wait(self) -> T:\n return super().wait()\n\n def value(self) -> T:\n return super().value()\n\n def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:\n return cast(Future[S], super().then(callback))\n\n def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:\n super().add_done_callback(callback)\n\n def set_result(self, result: T) -> None:\n super().set_result(result)\n\n def set_exception(self, result: T) -> None:\n assert isinstance(result, Exception), f'{result} is of type {type(result)}, not an Exception.'\n\n def raise_error(fut_result):\n raise fut_result\n super()._set_unwrap_func(raise_error)\n self.set_result(result)", + "docstring": "Wrapper around a `~torch.distributed.rpc.rpc_async`. It also exposes a set of APIs to add callback functions and set results. .. warning:: GPU support is a beta feature, subject to changes.", + "type": "class", + "file_path": "pytorch\\torch\\futures\\__init__.py", + "ast_data": "ClassDef name:Future FunctionDef name:__init__ arg:self arguments arg arg If Compare Assign Call Call Call FunctionDef name:done arg:self arguments arg Return return:yes Call Call FunctionDef name:wait arg:self arguments arg Return return:yes Call Call FunctionDef name:value arg:self arguments arg Return return:yes Call Call FunctionDef name:then arg:self arg:callback arguments arg arg Return return:yes Call Call Call FunctionDef name:add_done_callback arg:self arg:callback arguments arg arg Call Call FunctionDef name:set_result arg:self arg:result arguments arg arg Call Call FunctionDef name:set_exception arg:self arg:result arguments arg arg Call Call FunctionDef name:raise_error arg:fut_result arguments arg Raise Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_tick_out", + "source_code": "def get_tick_out(self):\n return self._tick_out", + "docstring": "Return whether ticks are drawn inside or outside the axes.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", + "ast_data": "FunctionDef name:get_tick_out arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "set_jac_params", + "source_code": "def set_jac_params(self, *args):\n self.jac_params = args\n return self", + "docstring": "Set extra parameters for user-supplied function jac.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_ode.py", + "ast_data": "FunctionDef name:set_jac_params arg:self arguments arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "iter_sequence_infinite", + "source_code": "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", + "docstring": "Iterates indefinitely over a Sequence. Args: seq: instance. Yields: Batches of data from the .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:iter_sequence_infinite arg:seq arguments arg While For" + }, + { + "library": "matplotlib", + "name": "set_linelength", + "source_code": "def set_linelength(self, linelength):\n if linelength == self.get_linelength():\n return\n lineoffset = self.get_lineoffset()\n segments = self.get_segments()\n pos = 1 if self.is_horizontal() else 0\n for segment in segments:\n segment[0, pos] = lineoffset + linelength / 2.0\n segment[1, pos] = lineoffset - linelength / 2.0\n self.set_segments(segments)\n self._linelength = linelength", + "docstring": "Set the length of the lines used to mark each event.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_linelength arg:self arg:linelength arguments arg arg If Compare Call Return return:no Assign Call Assign Call Assign Call For Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return self._shape()", + "docstring": "of this . If this operator acts like the batch matrix with , then this returns , equivalent to . Returns: , statically determined, may be undefined.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_calculate_shapes", + "source_code": "def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):\n return [broadcast_shape + tuple((dim_sizes[dim] for dim in core_dims)) for core_dims in list_of_core_dims]", + "docstring": "Helper for calculating broadcast shapes with core dimensions.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:_calculate_shapes arg:broadcast_shape arg:dim_sizes arg:list_of_core_dims arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "GridSpecFromSubplotSpec", + "source_code": "class GridSpecFromSubplotSpec(GridSpecBase):\n\n def __init__(self, nrows, ncols, subplot_spec, wspace=None, hspace=None, height_ratios=None, width_ratios=None):\n self._wspace = wspace\n self._hspace = hspace\n if isinstance(subplot_spec, SubplotSpec):\n self._subplot_spec = subplot_spec\n else:\n raise TypeError('subplot_spec must be type SubplotSpec, usually from GridSpec, or axes.get_subplotspec.')\n self.figure = self._subplot_spec.get_gridspec().figure\n super().__init__(nrows, ncols, width_ratios=width_ratios, height_ratios=height_ratios)\n\n def get_subplot_params(self, figure=None):\n hspace = self._hspace if self._hspace is not None else figure.subplotpars.hspace if figure is not None else mpl.rcParams['figure.subplot.hspace']\n wspace = self._wspace if self._wspace is not None else figure.subplotpars.wspace if figure is not None else mpl.rcParams['figure.subplot.wspace']\n figbox = self._subplot_spec.get_position(figure)\n left, bottom, right, top = figbox.extents\n return SubplotParams(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)\n\n def get_topmost_subplotspec(self):\n return self._subplot_spec.get_topmost_subplotspec()", + "docstring": "GridSpec whose subplot layout parameters are inherited from the location specified by a given SubplotSpec.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "ClassDef name:GridSpecFromSubplotSpec FunctionDef name:__init__ arg:self arg:nrows arg:ncols arg:subplot_spec arg:wspace arg:hspace arg:height_ratios arg:width_ratios arguments arg arg arg arg arg arg arg arg Assign Assign If Call Assign Raise Call Assign Call Call Call FunctionDef name:get_subplot_params arg:self arg:figure arguments arg arg Assign Compare Compare Assign Compare Compare Assign Call Assign Return return:yes Call FunctionDef name:get_topmost_subplotspec arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_kl_gamma_gamma", + "source_code": "@kullback_leibler.RegisterKL(Gamma, Gamma)\ndef _kl_gamma_gamma(g0, g1, name=None):\n with ops.name_scope(name, 'kl_gamma_gamma', values=[g0.concentration, g0.rate, g1.concentration, g1.rate]):\n return (g0.concentration - g1.concentration) * math_ops.digamma(g0.concentration) + math_ops.lgamma(g1.concentration) - math_ops.lgamma(g0.concentration) + g1.concentration * math_ops.log(g0.rate) - g1.concentration * math_ops.log(g1.rate) + g0.concentration * (g1.rate / g0.rate - 1.0)", + "docstring": "Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. Args: g0: instance of a Gamma distribution object. g1: instance of a Gamma distribution object. name: (optional) Name to use for created operations. Default is \"kl_gamma_gamma\". Returns: kl_gamma_gamma: . The batchwise KL(g0 || g1).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\gamma.py", + "ast_data": "FunctionDef name:_kl_gamma_gamma arg:g0 arg:g1 arg:name arguments arg arg arg With Call Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n lr_lambdas = state_dict.pop('lr_lambdas')\n self.__dict__.update(state_dict)\n state_dict['lr_lambdas'] = lr_lambdas\n for idx, fn in enumerate(lr_lambdas):\n if fn is not None:\n self.lr_lambdas[idx].__dict__.update(fn)", + "docstring": "Load the scheduler's state. When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Assign For Call If Compare Call" + }, + { + "library": "tensorflow", + "name": "reduce_max", + "source_code": "@tf_export('math.reduce_max', 'reduce_max', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=None):\n return reduce_max_with_dims(input_tensor, axis, keepdims, name, _ReductionDims(input_tensor, axis))", + "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. Usage example: >>> x = tf.constant([5, 1, 2, 4]) >>> tf.reduce_max(x) >>> x = tf.constant([-5, -1, -2, -4]) >>> tf.reduce_max(x) >>> x = tf.constant([4, float('nan')]) >>> tf.reduce_max(x) >>> x = tf.constant([float('nan'), float('nan')]) >>> tf.reduce_max(x) >>> x = tf.constant([float('-inf'), float('inf')]) >>> tf.reduce_max(x) See the numpy docs for and behavior. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:reduce_max arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "mminfo", + "source_code": "def mminfo(source):\n cursor, stream_to_close = _get_read_cursor(source, 1)\n h = cursor.header\n cursor.close()\n if stream_to_close:\n stream_to_close.close()\n return (h.nrows, h.ncols, h.nnz, h.format, h.field, h.symmetry)", + "docstring": "Return size and storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. Notes ----- .. versionchanged:: 1.12.0 C++ implementation. Examples -------- >>> from io import StringIO >>> from scipy.io import mminfo >>> text = '''%%MatrixMarket matrix coordinate real general ... 5 5 7 ... 2 3 1.0 ... 3 4 2.0 ... 3 5 3.0 ... 4 1 4.0 ... 4 2 5.0 ... 4 3 6.0 ... 4 4 7.0 ... ''' `` returns the number of rows, number of columns, format, field type and symmetry attribute of the source file. >>> mminfo(StringIO(text)) (5, 5, 7, 'coordinate', 'real', 'general')", + "type": "function", + "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py", + "ast_data": "FunctionDef name:mminfo arg:source arguments arg Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "scipy", + "name": "PowerResult", + "source_code": "@dataclass\nclass PowerResult:\n power: float | np.ndarray\n pvalues: float | np.ndarray", + "docstring": "Result object returned by . Attributes ---------- power : float or ndarray The estimated power. pvalues : float or ndarray The simulated p-values.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_resampling.py", + "ast_data": "ClassDef name:PowerResult" + }, + { + "library": "pandas", + "name": "_check_object_for_strings", + "source_code": "def _check_object_for_strings(values: np.ndarray) -> str:\n ndtype = values.dtype.name\n if ndtype == 'object':\n if lib.is_string_array(values, skipna=False):\n ndtype = 'string'\n return ndtype", + "docstring": "Check if we can use string hashtable instead of object hashtable. Parameters ---------- values : ndarray Returns ------- str", + "type": "function", + "file_path": "pandas\\pandas\\core\\algorithms.py", + "ast_data": "FunctionDef name:_check_object_for_strings arg:values arguments arg Assign If Compare If Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_load_csv", + "source_code": "def _load_csv(self, filename, format='compute_only'):\n assert format == 'compute_only'\n with open(filename, newline='') as csvfile:\n reader = csv.reader(csvfile)\n for rank, row in enumerate(reader):\n self.pipeline_order[rank] = [_Action.from_str(s) for s in row]\n self._validate_and_set_stage_mapping(self.pipeline_order)", + "docstring": "Load a CSV representation of the schedule from a file with the provided filename. This API will most likely get renamed/refactored so is marked as internal for now. format must be \"compute_only\" for PipelineScheduleMulti.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:_load_csv arg:self arg:filename arg:format arguments arg arg arg Compare With Call Assign Call For Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "query_key_value_clones", + "source_code": "def query_key_value_clones(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dtype: torch.dtype=None):\n if dtype is None:\n dtype = query.dtype\n query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)\n key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)\n value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)\n return (query_ref, key_ref, value_ref)", + "docstring": "Clones the query, key, and value tensors and moves them to the specified dtype.", + "type": "function", + "file_path": "pytorch\\benchmarks\\transformer\\score_mod.py", + "ast_data": "FunctionDef name:query_key_value_clones arg:query arg:key arg:value arg:dtype arguments arg arg arg arg If Compare Assign Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_proto", + "source_code": "@staticmethod\ndef from_proto(queue_runner_def, import_scope=None):\n return QueueRunner(queue_runner_def=queue_runner_def, import_scope=import_scope)", + "docstring": "Returns a object created from .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py", + "ast_data": "FunctionDef name:from_proto arg:queue_runner_def arg:import_scope arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "adaptive_max_pool2d_with_indices", + "source_code": "def adaptive_max_pool2d_with_indices(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool=False) -> tuple[Tensor, Tensor]:\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices)\n output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_max_pool2d(input, output_size)", + "docstring": "adaptive_max_pool2d(input, output_size, return_indices=False) Applies a 2D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or double-integer tuple) return_indices: whether to return pooling indices. Default: ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:adaptive_max_pool2d_with_indices arg:input arg:output_size arg:return_indices arguments arg arg arg If Call Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "layer", + "source_code": "@property\ndef layer(self):\n return self._layer", + "docstring": "Layer property can only be set before the sprite is added to a group, after that it is read only and a sprite's layer in a group should be set via the group's change_layer() method. Overwrites dynamic property from sprite class for speed.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:layer arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "list_options", + "source_code": "def list_options() -> list[str]:\n from torch._inductor import config\n current_config: dict[str, Any] = config.get_config_copy()\n return list(current_config.keys())", + "docstring": "Returns a dictionary describing the optimizations and debug configurations that are available to . The options are documented in . Example:: >>> torch._inductor.list_options()", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\__init__.py", + "ast_data": "FunctionDef name:list_options arguments Call Return return:yes Call Call" + }, + { + "library": "cryptography", + "name": "public_bytes_raw", + "source_code": "@abc.abstractmethod\ndef public_bytes_raw(self) -> bytes:\n pass", + "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py", + "ast_data": "FunctionDef name:public_bytes_raw arg:self arguments arg" + }, + { + "library": "pandas", + "name": "dtype", + "source_code": "@property\ndef dtype(self) -> ArrowDtype:\n return self._dtype", + "docstring": "An instance of 'ExtensionDtype'.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_sparse_csr_where", + "source_code": "def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:\n return _sparse_coo_where(mask.to_sparse_coo(), input.to_sparse_coo(), fill_value).to_sparse_csr()", + "docstring": "Sparse variant of torch.where. Supports sparse CSR tensors.", + "type": "function", + "file_path": "pytorch\\torch\\masked\\_ops.py", + "ast_data": "FunctionDef name:_sparse_csr_where arg:mask arg:input arg:fill_value arguments arg arg arg Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "getmro", + "source_code": "def getmro(cls):\n return _inspect.getmro(cls)", + "docstring": "TFDecorator-aware replacement for inspect.getmro.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:getmro arg:cls arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_DefaultDistributionStrategyV1", + "source_code": "class _DefaultDistributionStrategyV1(StrategyV1):\n\n def __init__(self):\n if not _creating_default_strategy_singleton:\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')\n super(_DefaultDistributionStrategyV1, self).__init__(_DefaultDistributionExtended(self))\n\n def __deepcopy__(self, memo):\n del memo\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')", + "docstring": "Default if none is explicitly selected.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:_DefaultDistributionStrategyV1 FunctionDef name:__init__ arg:self arguments arg If Raise Call Call Call Call FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Raise Call" + }, + { + "library": "numpy", + "name": "_ezclump", + "source_code": "def _ezclump(mask):\n if mask.ndim > 1:\n mask = mask.ravel()\n idx = (mask[1:] ^ mask[:-1]).nonzero()\n idx = idx[0] + 1\n if mask[0]:\n if len(idx) == 0:\n return [slice(0, mask.size)]\n r = [slice(0, idx[0])]\n r.extend((slice(left, right) for left, right in zip(idx[1:-1:2], idx[2::2])))\n else:\n if len(idx) == 0:\n return []\n r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]\n if mask[-1]:\n r.append(slice(idx[-1], mask.size))\n return r", + "docstring": "Finds the clumps (groups of data with the same values) for a 1D bool array. Returns a series of slices.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:_ezclump arg:mask arguments arg If Compare Assign Call Assign Call Assign If If Compare Call Return return:yes Call Assign Call Call Call Call If Compare Call Return return:no Assign Call Call If Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "round", + "source_code": "def round(self, decimals: int=0, *args, **kwargs) -> Self:\n return type(self)(pc.round(self._pa_array, ndigits=decimals))", + "docstring": "Round each value in the array a to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect. Returns ------- ArrowExtensionArray Rounded values of the ArrowExtensionArray. See Also -------- DataFrame.round : Round values of a DataFrame. Series.round : Round values of a Series.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "CCompiler_find_executables", + "source_code": "def CCompiler_find_executables(self):\n pass", + "docstring": "Does nothing here, but is called by the get_version method and can be overridden by subclasses. In particular it is redefined in the class where more documentation can be found.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\ccompiler.py", + "ast_data": "FunctionDef name:CCompiler_find_executables arg:self arguments arg" + }, + { + "library": "matplotlib", + "name": "_wait_cursor_for_draw_cm", + "source_code": "@contextmanager\ndef _wait_cursor_for_draw_cm(self):\n self._draw_time, last_draw_time = (time.time(), getattr(self, '_draw_time', -np.inf))\n if self._draw_time - last_draw_time > 1:\n try:\n self.canvas.set_cursor(tools.Cursors.WAIT)\n yield\n finally:\n self.canvas.set_cursor(self._last_cursor)\n else:\n yield", + "docstring": "Set the cursor to a wait cursor when drawing the canvas. In order to avoid constantly changing the cursor when the canvas changes frequently, do nothing if this context was triggered during the last second. (Optimally we'd prefer only setting the wait cursor if the *current* draw takes too long, but the current draw blocks the GUI thread).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:_wait_cursor_for_draw_cm arg:self arguments arg Assign Call Call If Compare Try Call Call" + }, + { + "library": "scipy", + "name": "pascal", + "source_code": "def pascal(n, kind='symmetric', exact=True):\n from scipy.special import comb\n if kind not in ['symmetric', 'lower', 'upper']:\n raise ValueError(\"kind must be 'symmetric', 'lower', or 'upper'\")\n if exact:\n if n >= 35:\n L_n = np.empty((n, n), dtype=object)\n L_n.fill(0)\n else:\n L_n = np.zeros((n, n), dtype=np.uint64)\n for i in range(n):\n for j in range(i + 1):\n L_n[i, j] = comb(i, j, exact=True)\n else:\n L_n = comb(*np.ogrid[:n, :n])\n if kind == 'lower':\n p = L_n\n elif kind == 'upper':\n p = L_n.T\n else:\n p = np.dot(L_n, L_n.T)\n return p", + "docstring": "Returns the n x n Pascal matrix. The Pascal matrix is a matrix containing the binomial coefficients as its elements. Parameters ---------- n : int The size of the matrix to create; that is, the result is an n x n matrix. kind : str, optional Must be one of 'symmetric', 'lower', or 'upper'. Default is 'symmetric'. exact : bool, optional If is True, the result is either an array of type numpy.uint64 (if n >> from scipy.linalg import pascal >>> pascal(4) array([[ 1, 1, 1, 1], [ 1, 2, 3, 4], [ 1, 3, 6, 10], [ 1, 4, 10, 20]], dtype=uint64) >>> pascal(4, kind='lower') array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 2, 1, 0], [1, 3, 3, 1]], dtype=uint64) >>> pascal(50)[-1, -1] 25477612258980856902730428600 >>> from scipy.special import comb >>> comb(98, 49, exact=True) 25477612258980856902730428600", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", + "ast_data": "FunctionDef name:pascal arg:n arg:kind arg:exact arguments arg arg arg If Compare Raise Call If If Compare Assign Call Call Assign Call For Call For Call Assign Call Assign Call If Compare Assign If Compare Assign Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "get_instances", + "source_code": "def get_instances(cls):\n return [x for x in gc.get_objects() if isinstance(x, cls)]", + "docstring": "Return GC instances.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\gctools.py", + "ast_data": "FunctionDef name:get_instances arg:cls arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_text", + "source_code": "def get_text(self):\n return self._text", + "docstring": "Return the cell instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:get_text arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "serialize", + "source_code": "@abc.abstractmethod\ndef serialize(self):\n pass", + "docstring": "Callback to serialize the object. Returns a string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\python_state.py", + "ast_data": "FunctionDef name:serialize arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "_prepare_exported_program_for_export", + "source_code": "def _prepare_exported_program_for_export(exported_program: torch.export.ExportedProgram, *, registry: _registration.ONNXRegistry) -> torch.export.ExportedProgram:\n exported_program = _fx_passes.decompose_with_registry(exported_program, registry)\n graph_module = exported_program.graph_module\n _fx_passes.insert_type_promotion_nodes(graph_module)\n graph_module = _fx_passes.remove_assertion_nodes(graph_module)\n exported_program._graph_module = graph_module\n return exported_program", + "docstring": "Decompose and apply pre-export transformations to the exported program.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py", + "ast_data": "FunctionDef name:_prepare_exported_program_for_export arg:exported_program arguments arg arg Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_patch_raw_predict", + "source_code": "@contextmanager\ndef _patch_raw_predict(estimator, raw_predictions):\n orig_raw_predict = estimator._raw_predict\n\n def _patched_raw_predicts(*args, **kwargs):\n return raw_predictions\n estimator._raw_predict = _patched_raw_predicts\n yield estimator\n estimator._raw_predict = orig_raw_predict", + "docstring": "Context manager that patches _raw_predict to return raw_predictions. is typically a precomputed array to avoid redundant state-wise computations fitting with early stopping enabled: in this case is incrementally updated whenever we add a tree to the boosted ensemble. Note: this makes fitting HistGradientBoosting* models inherently non thread safe at fit time. However thread-safety at fit time was never guaranteed nor enforced for scikit-learn estimators in general. Thread-safety at prediction/transform time is another matter as those operations are typically side-effect free and therefore often thread-safe by default for most scikit-learn models and would like to keep it that way. Therefore this context manager should only be used at fit time. TODO: in the future, we could explore the possibility to extend the scorer public API to expose a way to compute vales from raw predictions. That would probably require also making the scorer aware of the inverse link function used by the estimator which is typically private API for now, hence the need for this patching mechanism.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_patch_raw_predict arg:estimator arg:raw_predictions arguments arg arg Assign FunctionDef name:_patched_raw_predicts arguments arg arg Return return:yes Assign Assign" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "@torch.no_grad()\ndef step(self, closure=None):\n self._cuda_graph_capture_health_check()\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n for group in self.param_groups:\n params_with_grad: list[Tensor] = []\n grads: list[Tensor] = []\n row_vars: list[Optional[Tensor]] = []\n col_vars: list[Optional[Tensor]] = []\n variances: list[Optional[Tensor]] = []\n state_steps: list[Tensor] = []\n eps1, eps2 = group['eps']\n has_complex = self._init_group(group, params_with_grad, grads, row_vars, col_vars, variances, state_steps)\n adafactor(params_with_grad, grads, row_vars, col_vars, variances, state_steps, d=group['d'], lr=group['lr'], beta2_decay=group['beta2_decay'], weight_decay=group['weight_decay'], eps1=eps1, eps2=eps2, foreach=group['foreach'], maximize=group['maximize'], grad_scale=getattr(self, 'grad_scale', None), found_inf=getattr(self, 'found_inf', None), has_complex=has_complex)\n return loss", + "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\_adafactor.py", + "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "sequence_length_from_sparse_tensor", + "source_code": "def sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):\n with ops.name_scope(None, 'sequence_length') as name_scope:\n row_ids = sp_tensor.indices[:, 0]\n column_ids = sp_tensor.indices[:, 1]\n column_ids += array_ops.ones_like(column_ids)\n seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids)\n seq_length = math_ops.cast(math_ops.ceil(seq_length / num_elements), dtypes.int64)\n n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]\n padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)\n return array_ops.concat([seq_length, padding], axis=0, name=name_scope)", + "docstring": "Returns a [batch_size] Tensor with per-example sequence length.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\utils.py", + "ast_data": "FunctionDef name:sequence_length_from_sparse_tensor arg:sp_tensor arg:num_elements arguments arg arg With Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "from_tensors", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef from_tensors(self, tensors):\n return super().from_tensors(tensors)", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "EvalOutput", + "source_code": "class EvalOutput(_SupervisedOutput):\n\n def _get_signature_def_fn(self):\n return signature_def_utils.supervised_eval_signature_def", + "docstring": "Represents the output of a supervised eval process. This class generates the appropriate signature def for exporting eval output by type-checking and wrapping loss, predictions, and metrics values.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py", + "ast_data": "ClassDef name:EvalOutput FunctionDef name:_get_signature_def_fn arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_AutoShardDataset", + "source_code": "class _AutoShardDataset(dataset_ops.UnaryDataset):\n\n def __init__(self, input_dataset, num_workers, index, num_replicas=None):\n self._input_dataset = input_dataset\n self._element_spec = input_dataset.element_spec\n variant_tensor = ged_ops.auto_shard_dataset(self._input_dataset._variant_tensor, num_workers=num_workers, index=index, auto_shard_policy=int(input_dataset.options().experimental_distribute.auto_shard_policy), num_replicas=num_replicas, **self._flat_structure)\n super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec", + "docstring": "A that shards the automatically. This dataset takes in an existing dataset and tries to automatically figure out how to shard the dataset in a multi-worker scenario using graph rewrites. If the AutoShardPolicy is set to FILE, it walks up the dataset graph until it finds a reader dataset, then inserts a ShardDataset op before that node so that each worker only sees some files. If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the end of the input pipeline, before any terminal PrefetchDataset if there is one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it is written to legacy RebatchDataset for correctness reasons, since RebatchDatasetV2 is incompatible with data sharding. If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding. If it cannot find a reader dataset, it falls back to doing data-based sharding. If the AutoShardPolicy is set to OFF, it does nothing. Attributes: num_workers: Total number of workers to shard this dataset across. index: The current worker index (out of the total number of workers) this dataset is for. num_replicas: The total number of replicas across all workers. This is used only when sharding by data (either DATA or AUTO) in order to rewrite RebatchDatasetV2 to RebatchDataset. Raises: NotFoundError: If we cannot find a suitable reader dataset to begin automatically sharding the dataset.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py", + "ast_data": "ClassDef name:_AutoShardDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:num_workers arg:index arg:num_replicas arguments arg arg arg arg arg Assign Assign Assign Call Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "gather", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef gather(reference, indices):\n return array_ops.gather(reference, indices)", + "docstring": "Retrieves the elements of indices in the tensor . Args: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as . Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [1]) >>> tf.keras.backend.eval(var_gathered) array([[4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0,1,0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.], [4., 5., 6.], [1., 2., 3.]], dtype=float32)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:gather arg:reference arg:indices arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "_visit_sig_parameter_list", + "source_code": "def _visit_sig_parameter_list(self, node: Element, parameter_group: type[Element], sig_open_paren: str, sig_close_paren: str) -> None:\n self.body.append(f'{sig_open_paren}')\n self.is_first_param = True\n self.optional_param_level = 0\n self.params_left_at_level = 0\n self.param_group_index = 0\n self.list_is_required_param = [isinstance(c, parameter_group) for c in node.children]\n self.required_params_left = sum(self.list_is_required_param)\n self.param_separator = node.child_text_separator\n self.multi_line_parameter_list = node.get('multi_line_parameter_list', False)\n self.trailing_comma = node.get('multi_line_trailing_comma', False)\n if self.multi_line_parameter_list:\n self.body.append('\\n\\n')\n self.body.append(self.starttag(node, 'dl'))\n self.param_separator = self.param_separator.rstrip()\n self.context.append(sig_close_paren)", + "docstring": "Visit a signature parameters or type parameters list. The *parameter_group* value is the type of child nodes acting as required parameters or as a set of contiguous optional parameters.", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\html5.py", + "ast_data": "FunctionDef name:_visit_sig_parameter_list arg:self arg:node arg:parameter_group arg:sig_open_paren arg:sig_close_paren arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign Call Assign Call Assign Assign Call Assign Call If Call Call Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "root_mean_squared_error", + "source_code": "@tf_export(v1=['metrics.root_mean_squared_error'])\ndef root_mean_squared_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.root_mean_squared_error is not supported when eager execution is enabled.')\n predictions, labels, weights = _remove_squeezable_dimensions(predictions=predictions, labels=labels, weights=weights)\n mse, update_mse_op = mean_squared_error(labels, predictions, weights, None, None, name or 'root_mean_squared_error')\n once_across_replicas = lambda _, mse: math_ops.sqrt(mse)\n rmse = _aggregate_across_replicas(metrics_collections, once_across_replicas, mse)\n update_rmse_op = math_ops.sqrt(update_mse_op)\n if updates_collections:\n ops.add_to_collections(updates_collections, update_rmse_op)\n return (rmse, update_rmse_op)", + "docstring": "Computes the root mean squared error between the labels and predictions. The function creates two local variables, and that are used to compute the root mean squared error. This average is weighted by , and it is ultimately returned as : an idempotent operation that takes the square root of the division of by . For estimation of the metric over a stream of data, the function creates an operation that updates these variables and returns the . Internally, a operation computes the element-wise square of the difference between and . Then increments with the reduced sum of the product of and , and it increments with the reduced sum of . If is , weights default to 1. Use weights of 0 to mask values. Args: labels: A of the same shape as . predictions: A of arbitrary shape. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that should be added to. updates_collections: An optional list of collections that should be added to. name: An optional variable_scope name. Returns: root_mean_squared_error: A representing the current mean, the value of divided by . update_op: An operation that increments the and variables appropriately and whose value matches . Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:root_mean_squared_error arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call Assign Call Assign Call BoolOp Assign arguments arg arg Call Assign Call Assign Call If Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_trackable_children", + "source_code": "def _trackable_children(self, save_type=trackable.SaveType.CHECKPOINT, **kwargs):\n current_graph_non_slot_variables = {}\n current_graph_key = ops.get_default_graph()._graph_key\n for (name, _), variable_object in sorted(self._non_slot_dict.items(), key=lambda item: item[0][0]):\n if context.executing_eagerly() or variable_object._graph_key == current_graph_key:\n current_graph_non_slot_variables[name] = variable_object\n current_graph_non_slot_variables.update(super()._trackable_children(save_type, **kwargs))\n return current_graph_non_slot_variables", + "docstring": "From Trackable. Gather graph-specific non-slot variables to save.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg Assign Assign Call For Call Call arguments arg If BoolOp Call Compare Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, reset=False)\n if self.pooling_func == np.mean and (not issparse(X)):\n size = np.bincount(self.labels_)\n n_samples = X.shape[0]\n nX = np.array([np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)])\n else:\n nX = [self.pooling_func(X[:, self.labels_ == l], axis=1) for l in np.unique(self.labels_)]\n nX = np.array(nX).T\n return nX", + "docstring": "Transform a new matrix using the built clustering. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) A M by N array of M observations in N dimensions or a length M array of M one-dimensional observations. Returns ------- Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,) The pooled values for each feature cluster.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_feature_agglomeration.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call If BoolOp Compare Call Assign Call Assign Assign Call Call Call Assign Call Compare Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "BiggsExp04", + "source_code": "class BiggsExp04(Benchmark):\n\n def __init__(self, dimensions=4):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * 4, [20.0] * 4))\n self.global_optimum = [[1.0, 10.0, 1.0, 5.0]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n t = arange(1, 11.0) * 0.1\n y = exp(-t) - 5 * exp(-10 * t)\n vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) - y) ** 2\n return sum(vec)", + "docstring": "BiggsExp04 objective function. The BiggsExp04 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix}\\ f_{\\text{BiggsExp04}}(x) = \\sum_{i=1}^{10} (x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\\\ t_i = 0.1i\\\\ y_i = e^{-t_i} - 5 e^{-10 t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", + "ast_data": "ClassDef name:BiggsExp04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_attrs", + "source_code": "@tf_export('__internal__.nest.is_attrs', v1=[])\ndef is_attrs(obj):\n return _is_attrs(obj)", + "docstring": "Returns a true if its input is an instance of an attr.s decorated class.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", + "ast_data": "FunctionDef name:is_attrs arg:obj arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "fresnel_cos", + "source_code": "@tf_export('math.special.fresnel_cos')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef fresnel_cos(x, name=None):\n with ops.name_scope(name, 'fresnel_cos', [x]):\n return gen_special_math_ops.fresnel_cos(x)", + "docstring": "Computes Fresnel's cosine integral of element-wise. The Fresnel cosine integral is defined as the integral of from to , with the domain of definition all real numbers. The Fresnel cosine integral is odd. >>> tf.math.special.fresnel_cos([-1., -0.1, 0.1, 1.]).numpy() array([-0.7798934 , -0.09999753, 0.09999753, 0.7798934 ], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.fresnel second output. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:fresnel_cos arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "pol2cart", + "source_code": "def pol2cart(rho: Tensor, phi: Tensor) -> tuple[Tensor, Tensor]:\n if not isinstance(rho, Tensor) & isinstance(phi, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(rho)}, {type(phi)}')\n x = rho * cos(phi)\n y = rho * sin(phi)\n return (x, y)", + "docstring": "Convert polar coordinates to cartesian coordinates. Args: rho: Tensor of arbitrary shape. phi: Tensor of same arbitrary shape. Returns: - x: Tensor with same shape as input. - y: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi)", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:pol2cart arg:rho arg:phi arguments arg arg If Call Call Raise Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_make_axes_method", + "source_code": "def _make_axes_method(func):\n func.__qualname__ = f'Axes.{func.__name__}'\n return func", + "docstring": "Patch the qualname for functions that are directly added to Axes. Some Axes functionality is defined in functions in other submodules. These are simply added as attributes to Axes. As a result, their ` will also show \"Axes.table\". However, since these functions are not intended to be standalone, this is bearable.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:_make_axes_method arg:func arguments arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_initial_nodes_b", + "source_code": "def _initial_nodes_b(n, k):\n a = n % 2 - 0.5\n nu = 4.0 * floor(n / 2.0) + 2.0 * a + 2.0\n ak = _specfun.airyzo(k.max(), 1)[0][::-1]\n xksq = nu + 2.0 ** (2.0 / 3.0) * ak * nu ** (1.0 / 3.0) + 1.0 / 5.0 * 2.0 ** (4.0 / 3.0) * ak ** 2 * nu ** (-1.0 / 3.0) + (9.0 / 140.0 - 12.0 / 175.0 * ak ** 3) * nu ** (-1.0) + (16.0 / 1575.0 * ak + 92.0 / 7875.0 * ak ** 4) * 2.0 ** (2.0 / 3.0) * nu ** (-5.0 / 3.0) - (15152.0 / 3031875.0 * ak ** 5 + 1088.0 / 121275.0 * ak ** 2) * 2.0 ** (1.0 / 3.0) * nu ** (-7.0 / 3.0)\n return xksq", + "docstring": "Gatteschi initial guesses Computes an initial approximation to the square of the kth (positive) root :math: of the Hermite polynomial :math: of order :math:. The formula is the one from lemma 3.2 in the original paper. The guesses are accurate in the region just below :math:. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate root See Also -------- initial_nodes roots_hermite_asy", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:_initial_nodes_b arg:n arg:k arguments arg arg Assign Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "real_inputs", + "source_code": "@property\ndef real_inputs(self):\n return _real_inputs._get_handler()", + "docstring": "non-fake example inputs", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\virtualized.py", + "ast_data": "FunctionDef name:real_inputs arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "append", + "source_code": "def append(self, value):\n self._check_external_modification()\n super().append(value)\n self._update_snapshot()", + "docstring": "Add a new trackable value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:append arg:self arg:value arguments arg arg Call Call Call Call" + }, + { + "library": "django", + "name": "name", + "source_code": "@property\ndef name(self):\n name = capi.get_field_name(self.ptr)\n return force_str(name, encoding=self._feat.encoding, strings_only=True)", + "docstring": "Return the name of this Field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_init_attrs", + "source_code": "def _init_attrs(self, **kwargs):\n attrs = self.__class__.__slots__\n public_attrs = [attr[1:] for attr in attrs]\n invalid_keys = set(kwargs.keys()) - set(public_attrs)\n if invalid_keys:\n raise ValueError(f'found {tuple(invalid_keys)} invalid keyword arguments, please only use {public_attrs}')\n for attr in attrs:\n setattr(self, attr, kwargs.get(attr[1:], None))", + "docstring": "Initialize each attributes with the corresponding keyword arg value or a default of None", + "type": "method", + "file_path": "scipy\\scipy\\io\\_mmio.py", + "ast_data": "FunctionDef name:_init_attrs arg:self arguments arg arg Assign Assign Assign Call Call Call If Raise Call Call For Call Call" + }, + { + "library": "pytorch", + "name": "from_float", + "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n float_modules = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.ao.nn.intrinsic.modules.fused.LinearReLU, torch.ao.nn.qat.dynamic.Linear]\n assert type(mod) in float_modules, 'nn.quantized.dynamic.Linear.from_float only works for one of' + str([float_mod.__name__ for float_mod in float_modules])\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n if type(mod) == nni.LinearReLU:\n mod = mod[0]\n if mod.qconfig is not None and mod.qconfig.weight is not None:\n weight_observer = mod.qconfig.weight()\n else:\n from torch.ao.quantization.qconfig import default_dynamic_qconfig\n weight_observer = default_dynamic_qconfig.weight()\n dtype = weight_observer.dtype\n assert dtype in [torch.qint8, torch.float16], f'The only supported dtypes for dynamic quantized linear are qint8 and float16 got: {dtype}'\n weight_observer(mod.weight)\n if dtype == torch.qint8:\n qweight = _quantize_weight(mod.weight.float(), weight_observer)\n elif dtype == torch.float16:\n qweight = mod.weight.float()\n else:\n raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')\n qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)\n qlinear.set_weight_bias(qweight, mod.bias)\n return qlinear", + "docstring": "Create a dynamic quantized module from a float module or qparams_dict Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\linear.py", + "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Compare Call Call Call If Compare Call Assign If BoolOp Compare Compare Assign Call Assign Call Assign Compare Call If Compare Assign Call Call If Compare Assign Call Raise Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "dims", + "source_code": "def dims(*names: str, min: Optional[int]=None, max: Optional[int]=None) -> tuple[Dim, ...]:\n return tuple((Dim(name, min=min, max=max) for name in names))", + "docstring": "Util to create multiple :func: types. Returns: A tuple of :func: types.", + "type": "function", + "file_path": "pytorch\\torch\\export\\dynamic_shapes.py", + "ast_data": "FunctionDef name:dims arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_parse_shard_info_str", + "source_code": "def _parse_shard_info_str(spec: str) -> tuple[list[int], trackable_base.ShardInfo]:\n shape = [int(x) for x in spec.split()[:-1]]\n slices = spec.split()[-1].split(':')\n offset = [int(x.split(',')[0]) for x in slices]\n shard_shape = [int(x.split(',')[1]) for x in slices]\n return (shape, trackable_base.ShardInfo(offset=offset, shape=shard_shape))", + "docstring": "Parses shape and shard_info string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", + "ast_data": "FunctionDef name:_parse_shard_info_str arg:spec arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "transpose_input", + "source_code": "def transpose_input(from_cudnn):\n order = 'F' if from_cudnn else 'C'\n\n def transform(kernel):\n return kernel.T.reshape(kernel.shape, order=order)\n return transform", + "docstring": "Makes a function that transforms input kernels from/to CuDNN format. It keeps the shape, but changes between the layout (Fortran/C). Eg.: It can be passed to . Args: from_cudnn: if source weights are in CuDNN format, if they're in plain Keras format. Returns: Function that converts input kernel to the other format.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py", + "ast_data": "FunctionDef name:transpose_input arg:from_cudnn arguments arg Assign FunctionDef name:transform arg:kernel arguments arg Return return:yes Call Return return:yes" + }, + { + "library": "sphinx", + "name": "set_application", + "source_code": "def set_application(self, app: Sphinx) -> None:\n self._app = app\n self.config = app.config\n self.env = app.env", + "docstring": "set_application will be called from Sphinx to set app and other instance variables :param sphinx.application.Sphinx app: Sphinx application object", + "type": "method", + "file_path": "sphinx\\sphinx\\parsers.py", + "ast_data": "FunctionDef name:set_application arg:self arg:app arguments arg arg Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "set", + "source_code": "def set(self, value):\n pywrap_tfe.TFE_MonitoringStringGaugeCellSet(self._cell, value)", + "docstring": "Atomically set the value. Args: value: string value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:set arg:self arg:value arguments arg arg Call" + }, + { + "library": "pandas", + "name": "freq", + "source_code": "@property\ndef freq(self) -> BaseOffset:\n return self.dtype.freq", + "docstring": "Return the frequency object for this PeriodArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\period.py", + "ast_data": "FunctionDef name:freq arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "__delitem__", + "source_code": "def __delitem__(self, key):\n if not self.loaded:\n self.load()\n del self._data[key]", + "docstring": "Delete object stored in the session.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__delitem__ arg:self arg:key arguments arg arg If Call" + }, + { + "library": "scrapy", + "name": "without_none_values", + "source_code": "def without_none_values(iterable: Mapping[_KT, _VT] | Iterable[_KT]) -> dict[_KT, _VT] | Iterable[_KT]:\n if isinstance(iterable, Mapping):\n return {k: v for k, v in iterable.items() if v is not None}\n return type(iterable)((v for v in iterable if v is not None))", + "docstring": "Return a copy of `` have been removed.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:without_none_values arg:iterable arguments arg If Call Return return:yes Call Compare Return return:yes Call Call Compare" + }, + { + "library": "sphinx", + "name": "IndexEntry", + "source_code": "class IndexEntry(NamedTuple):\n name: str\n subtype: int\n docname: str\n anchor: str\n extra: str\n qualifier: str\n descr: str", + "docstring": "An index entry. .. note:: The *qualifier* and *description* are not rendered for some output formats, such as LaTeX.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\_index.py", + "ast_data": "ClassDef name:IndexEntry" + }, + { + "library": "tensorflow", + "name": "scatter_add", + "source_code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", + "docstring": "Adds to this variable. Args: sparse_delta: to be added to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:scatter_add arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "_make_block", + "source_code": "def _make_block(values: ArrayLike, placement: np.ndarray) -> Block:\n dtype = values.dtype\n klass = get_block_type(dtype)\n placement_obj = BlockPlacement(placement)\n if isinstance(dtype, ExtensionDtype) and dtype._supports_2d or isinstance(values, (DatetimeArray, TimedeltaArray)):\n values = ensure_block_shape(values, ndim=2)\n values = maybe_coerce_values(values)\n return klass(values, ndim=2, placement=placement_obj)", + "docstring": "This is an analogue to blocks.new_block(_2d) that ensures: 1) correct dimension for EAs that support 2D (), and 2) correct EA class for datetime64/timedelta64 (). The input is assumed to be either numpy array or ExtensionArray: - In case of a numpy array, it is assumed to already be in the expected shape for Blocks (2D, (cols, rows)). - In case of an ExtensionArray the input can be 1D, also for EAs that are internally stored as 2D. For the rest no preprocessing or validation is done, except for those dtypes that are internally stored as EAs but have an exact numpy equivalent (and at the moment use that numpy dtype), i.e. datetime64/timedelta64.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\api.py", + "ast_data": "FunctionDef name:_make_block arg:values arg:placement arguments arg arg Assign Assign Call Assign Call If BoolOp BoolOp Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "autocorr", + "source_code": "def autocorr(self, lag: int=1) -> float:\n return self.corr(cast(Series, self.shift(lag)))", + "docstring": "Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:autocorr arg:self arg:lag arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "string_input_producer", + "source_code": "@tf_export(v1=['train.string_input_producer'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.')\ndef string_input_producer(string_tensor, num_epochs=None, shuffle=True, seed=None, capacity=32, shared_name=None, name=None, cancel_op=None):\n not_null_err = 'string_input_producer requires a non-null input tensor'\n if not isinstance(string_tensor, tensor_lib.Tensor) and (not string_tensor):\n raise ValueError(not_null_err)\n with ops.name_scope(name, 'input_producer', [string_tensor]) as name:\n string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)\n with ops.control_dependencies([control_flow_assert.Assert(math_ops.greater(array_ops.size(string_tensor), 0), [not_null_err])]):\n string_tensor = array_ops.identity(string_tensor)\n return input_producer(input_tensor=string_tensor, element_shape=[], num_epochs=num_epochs, shuffle=shuffle, seed=seed, capacity=capacity, shared_name=shared_name, name=name, summary_name='fraction_of_%d_full' % capacity, cancel_op=cancel_op)", + "docstring": "Output strings (e.g. filenames) to a queue for an input pipeline. Note: if is not , this function creates local counter . Use to initialize local variables. Args: string_tensor: A 1-D string tensor with the strings to produce. num_epochs: An integer (optional). If specified, produces each string from times before generating an error. If not specified, can cycle through the strings in an unlimited number of times. shuffle: Boolean. If true, the strings are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. All sessions open to the device which has this queue will be able to access it via the shared_name. Using this in a distributed setting means each name will only be seen by one of the sessions which has access to this operation. name: A name for the operations (optional). cancel_op: Cancel op for the queue (optional). Returns: A queue with the output strings. A for the Queue is added to the current 's collection. Raises: ValueError: If the string_tensor is a null Python list. At runtime, will fail with an assertion if string_tensor becomes a null tensor. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:string_input_producer arg:string_tensor arg:num_epochs arg:shuffle arg:seed arg:capacity arg:shared_name arg:name arg:cancel_op arguments arg arg arg arg arg arg arg arg Assign If BoolOp Call Raise Call With Call Assign Call With Call Call Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_figure", + "source_code": "def get_figure(self, root=None):\n if self._root_figure is self:\n return self\n if self._parent is self._root_figure:\n return self._parent\n if root is None:\n message = 'From Matplotlib 3.12 SubFigure.get_figure will by default return the direct parent figure, which may be a SubFigure. To suppress this warning, pass the root parameter. Pass `True` to maintain the old behavior and `False` to opt-in to the future behavior.'\n _api.warn_deprecated('3.10', message=message)\n root = True\n if root:\n return self._root_figure\n return self._parent", + "docstring": "Return the or instance the (Sub)Figure belongs to. Parameters ---------- root : bool, default=True If False, return the (Sub)Figure this artist is on. If True, return the root Figure for a nested tree of SubFigures. .. deprecated:: 3.10 From version 3.12 *root* will default to False.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_figure arg:self arg:root arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Assign Call Assign If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "generate_keras_metadata", + "source_code": "def generate_keras_metadata(saved_nodes, node_paths):\n metadata = saved_metadata_pb2.SavedMetadata()\n for node_id, node in enumerate(saved_nodes):\n if isinstance(node, base_layer.Layer):\n path = node_paths[node]\n if not path:\n node_path = 'root'\n else:\n node_path = 'root.{}'.format('.'.join([ref.name for ref in path]))\n metadata.nodes.add(node_id=node_id, node_path=node_path, version=versions_pb2.VersionDef(producer=1, min_consumer=1, bad_consumers=[]), identifier=node._object_identifier, metadata=node._tracking_metadata)\n return metadata", + "docstring": "Constructs a KerasMetadata proto with the metadata of each keras object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save.py", + "ast_data": "FunctionDef name:generate_keras_metadata arg:saved_nodes arg:node_paths arguments arg arg Assign Call For Call If Call Assign If Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "align_ylabels", + "source_code": "def align_ylabels(self, axs=None):\n if axs is None:\n axs = self.axes\n axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]\n for ax in axs:\n _log.debug(' Working on: %s', ax.get_ylabel())\n colspan = ax.get_subplotspec().colspan\n pos = ax.yaxis.get_label_position()\n for axc in axs:\n if axc.yaxis.get_label_position() == pos:\n colspanc = axc.get_subplotspec().colspan\n if pos == 'left' and colspan.start == colspanc.start or (pos == 'right' and colspan.stop == colspanc.stop):\n self._align_label_groups['y'].join(ax, axc)", + "docstring": "Align the ylabels of subplots in the same subplot column if label alignment is being done automatically (i.e. the label position is not manually set). Alignment persists for draw events after this is called. If a label is on the left, it is aligned with labels on Axes that also have their label on the left and that have the same left-most subplot column. If the label is on the right, it is aligned with labels on Axes with the same right-most column. Parameters ---------- axs : list of Optional list (or ) of to align the ylabels. Default is to align all Axes on the figure. See Also -------- matplotlib.figure.Figure.align_xlabels matplotlib.figure.Figure.align_titles matplotlib.figure.Figure.align_labels Notes ----- This assumes that all Axes in `.GridSpec.SubplotSpec` positions correspond to figure positions. Examples -------- Example with large yticks labels:: fig, axs = plt.subplots(2, 1) axs[0].plot(np.arange(0, 1000, 50)) axs[0].set_ylabel('YLabel 0') axs[1].set_ylabel('YLabel 1') fig.align_ylabels()", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:align_ylabels arg:self arg:axs arguments arg arg If Compare Assign Assign Call Compare Call For Call Call Assign Call Assign Call For If Compare Call Assign Call If BoolOp BoolOp Compare Compare BoolOp Compare Compare Call" + }, + { + "library": "tensorflow", + "name": "on_test_batch_begin", + "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_test_batch_begin(self, batch, logs=None):\n pass", + "docstring": "Called at the beginning of a batch in methods. Also called at the beginning of a validation batch in the methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_test_batch_begin arg:self arg:batch arg:logs arguments arg arg arg" + }, + { + "library": "django", + "name": "_check_prepopulated_fields_value", + "source_code": "def _check_prepopulated_fields_value(self, obj, val, label):\n if not isinstance(val, (list, tuple)):\n return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')\n else:\n return list(chain.from_iterable((self._check_prepopulated_fields_value_item(obj, subfield_name, '%s[%r]' % (label, index)) for index, subfield_name in enumerate(val))))", + "docstring": "Check a value of dictionary, i.e. it's an iterable of existing fields.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_prepopulated_fields_value arg:self arg:obj arg:val arg:label arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_maybe_convert_labels", + "source_code": "def _maybe_convert_labels(y_true):\n are_zeros = math_ops.equal(y_true, 0)\n are_ones = math_ops.equal(y_true, 1)\n is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))\n\n def _convert_binary_labels():\n return 2.0 * y_true - 1.0\n updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true)\n return updated_y_true", + "docstring": "Converts binary labels into -1/1.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_maybe_convert_labels arg:y_true arguments arg Assign Call Assign Call Assign Call Call FunctionDef name:_convert_binary_labels arguments Return return:yes Assign Call arguments Return return:yes" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, dim=None, seed=None):\n return ortho_group_frozen(dim, seed=seed)", + "docstring": "Create a frozen O(N) distribution. See for more information.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "__enter__", + "source_code": "def __enter__(self):\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self", + "docstring": "Open the context.", + "type": "method", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:__enter__ arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "triton_compute_type", + "source_code": "def triton_compute_type(dtype: torch.dtype) -> str:\n return triton_type(upcast_compute_type(dtype))", + "docstring": "Convert torch.dtype to triton type and upcast [b]float16 to float32", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:triton_compute_type arg:dtype arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_python_properties_internal", + "source_code": "def _python_properties_internal(self):\n metadata = dict(name=self.obj.name, trainable=self.obj.trainable, expects_training_arg=self.obj._expects_training_arg, dtype=policy.serialize(self.obj._dtype_policy), batch_input_shape=getattr(self.obj, '_batch_input_shape', None), stateful=self.obj.stateful, must_restore_from_config=self.obj._must_restore_from_config)\n metadata.update(get_serialized(self.obj))\n if self.obj.input_spec is not None:\n metadata['input_spec'] = nest.map_structure(lambda x: generic_utils.serialize_keras_object(x) if x else None, self.obj.input_spec)\n if self.obj.activity_regularizer is not None and hasattr(self.obj.activity_regularizer, 'get_config'):\n metadata['activity_regularizer'] = generic_utils.serialize_keras_object(self.obj.activity_regularizer)\n if self.obj._build_input_shape is not None:\n metadata['build_input_shape'] = self.obj._build_input_shape\n return metadata", + "docstring": "Returns dictionary of all python properties.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py", + "ast_data": "FunctionDef name:_python_properties_internal arg:self arguments arg Assign Call Call Call Call Call If Compare Assign Call arguments arg Call If BoolOp Compare Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, '_n_features_out')\n return _generate_get_feature_names_out(self, self._n_features_out, input_features=input_features)", + "docstring": "Get output feature names for transformation. The feature names out will prefixed by the lowercased class name. For example, if the transformer outputs 3 features, then the feature names out are: . Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in . Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)\n method = 'lasso_' + self.fit_algorithm\n random_state = check_random_state(self.random_state)\n X = validate_data(self, X)\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n V, U, E, self.n_iter_ = _dict_learning(X, n_components, alpha=self.alpha, tol=self.tol, max_iter=self.max_iter, method=method, method_max_iter=self.transform_max_iter, n_jobs=self.n_jobs, code_init=self.code_init, dict_init=self.dict_init, callback=self.callback, verbose=self.verbose, random_state=random_state, return_n_iter=True, positive_dict=self.positive_dict, positive_code=self.positive_code)\n self.components_ = U\n self.error_ = E\n return V", + "docstring": "Fit the model from data in X and return the transformed data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- V : ndarray of shape (n_samples, n_components) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Assign Assign Call Assign Call If Compare Assign Assign Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "OneDeviceStrategy", + "source_code": "@tf_export('distribute.OneDeviceStrategy', v1=[])\nclass OneDeviceStrategy(distribute_lib.Strategy):\n\n def __init__(self, device):\n super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))\n distribute_lib.distribution_strategy_gauge.get_cell('V2').set('OneDeviceStrategy')\n\n def experimental_distribute_dataset(self, dataset, options=None):\n return super(OneDeviceStrategy, self).experimental_distribute_dataset(dataset, options)\n\n def distribute_datasets_from_function(self, dataset_fn, options=None):\n return super(OneDeviceStrategy, self).distribute_datasets_from_function(dataset_fn, options)\n\n def experimental_local_results(self, value):\n return super(OneDeviceStrategy, self).experimental_local_results(value)\n\n def run(self, fn, args=(), kwargs=None, options=None):\n return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)\n\n def reduce(self, reduce_op, value, axis):\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)\n\n def scope(self):\n return super(OneDeviceStrategy, self).scope()", + "docstring": "A distribution strategy for running on a single device. Using this strategy will place any variables created in its scope on the specified device. Input distributed through this strategy will be prefetched to the specified device. Moreover, any functions called via will also be placed on the specified device as well. Typical usage of this strategy could be testing your code with the tf.distribute.Strategy API before switching to other strategies which actually distribute to multiple devices/machines. For example:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "ClassDef name:OneDeviceStrategy FunctionDef name:__init__ arg:self arg:device arguments arg arg Call Call Call Call Call FunctionDef name:experimental_distribute_dataset arg:self arg:dataset arg:options arguments arg arg arg Return return:yes Call Call FunctionDef name:distribute_datasets_from_function arg:self arg:dataset_fn arg:options arguments arg arg arg Return return:yes Call Call FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call Call FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Return return:yes Call Call FunctionDef name:reduce arg:self arg:reduce_op arg:value arg:axis arguments arg arg arg arg Return return:yes Call Call FunctionDef name:scope arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "set_job", + "source_code": "def set_job(self, fit_type=None, deriv=None, var_calc=None, del_init=None, restart=None):\n if self.job is None:\n job_l = [0, 0, 0, 0, 0]\n else:\n job_l = [self.job // 10000 % 10, self.job // 1000 % 10, self.job // 100 % 10, self.job // 10 % 10, self.job % 10]\n if fit_type in (0, 1, 2):\n job_l[4] = fit_type\n if deriv in (0, 1, 2, 3):\n job_l[3] = deriv\n if var_calc in (0, 1, 2):\n job_l[2] = var_calc\n if del_init in (0, 1):\n job_l[1] = del_init\n if restart in (0, 1):\n job_l[0] = restart\n self.job = job_l[0] * 10000 + job_l[1] * 1000 + job_l[2] * 100 + job_l[3] * 10 + job_l[4]", + "docstring": "Sets the \"job\" parameter is a hopefully comprehensible way. If an argument is not specified, then the value is left as is. The default value from class initialization is for all of these options set to 0. Parameters ---------- fit_type : {0, 1, 2} int 0 -> explicit ODR 1 -> implicit ODR 2 -> ordinary least-squares deriv : {0, 1, 2, 3} int 0 -> forward finite differences 1 -> central finite differences 2 -> user-supplied derivatives (Jacobians) with results checked by ODRPACK 3 -> user-supplied derivatives, no checking var_calc : {0, 1, 2} int 0 -> calculate asymptotic covariance matrix and fit parameter uncertainties (V_B, s_B) using derivatives recomputed at the final solution 1 -> calculate V_B and s_B using derivatives from last iteration 2 -> do not calculate V_B and s_B del_init : {0, 1} int 0 -> initial input variable offsets set to 0 1 -> initial offsets provided by user in variable \"work\" restart : {0, 1} int 0 -> fit is not a restart 1 -> fit is a restart Notes ----- The permissible values are different from those given on pg. 31 of the ODRPACK User's Guide only in that one cannot specify numbers greater than the last value for each variable. If one does not supply functions to compute the Jacobians, the fitting procedure will change deriv to 0, finite differences, as a default. To initialize the input variable offsets by yourself, set del_init to 1 and put the offsets into the \"work\" variable correctly.", + "type": "method", + "file_path": "scipy\\scipy\\odr\\_odrpack.py", + "ast_data": "FunctionDef name:set_job arg:self arg:fit_type arg:deriv arg:var_calc arg:del_init arg:restart arguments arg arg arg arg arg arg If Compare Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign" + }, + { + "library": "pandas", + "name": "read_column", + "source_code": "def read_column(self, column: str, where=None, start: int | None=None, stop: int | None=None):\n self.validate_version()\n if not self.infer_axes():\n return False\n if where is not None:\n raise TypeError('read_column does not currently accept a where clause')\n for a in self.axes:\n if column == a.name:\n if not a.is_data_indexable:\n raise ValueError(f'column [{column}] can not be extracted individually; it is not data indexable')\n c = getattr(self.table.cols, column)\n a.set_info(self.info)\n col_values = a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors)\n cvs = col_values[1]\n dtype = getattr(self.table.attrs, f'{column}_meta', None)\n return Series(cvs, name=column, copy=False, dtype=dtype)\n raise KeyError(f'column [{column}] not found in the table')", + "docstring": "return a single column from the table, generally only indexables are interesting", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:read_column arg:self arg:column arg:where arg:start arg:stop arguments arg arg arg arg arg Call If Call Return return:yes If Compare Raise Call For If Compare If Raise Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "he_normal", + "source_code": "@tf_export(v1=['initializers.he_normal'])\ndef he_normal(seed=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)", + "docstring": "He normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by where is the number of input units in the weight tensor. Args: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] ( # pylint: disable=line-too-long ([pdf](", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:he_normal arg:seed arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_fused_node", + "source_code": "def get_fused_node(self, node: BaseSchedulerNode) -> BaseSchedulerNode:\n return self.name_to_fused_node[node.get_first_name()]", + "docstring": "Look up the node in Scheduler name_to_fused_node", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:get_fused_node arg:self arg:node arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "GeometryCollection", + "source_code": "class GeometryCollection(OGRGeometry):\n\n def __getitem__(self, index):\n if 0 <= index < self.geom_count:\n return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)\n else:\n raise IndexError('Index out of range when accessing geometry in a collection: %s.' % index)\n\n def __len__(self):\n return self.geom_count\n\n def add(self, geom):\n if isinstance(geom, OGRGeometry):\n if isinstance(geom, self.__class__):\n for g in geom:\n capi.add_geom(self.ptr, g.ptr)\n else:\n capi.add_geom(self.ptr, geom.ptr)\n elif isinstance(geom, str):\n tmp = OGRGeometry(geom)\n capi.add_geom(self.ptr, tmp.ptr)\n else:\n raise GDALException('Must add an OGRGeometry.')\n\n @property\n def point_count(self):\n return sum((self[i].point_count for i in range(self.geom_count)))\n\n @property\n def tuple(self):\n return tuple((self[i].tuple for i in range(self.geom_count)))\n coords = tuple", + "docstring": "The Geometry Collection class.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "ClassDef name:GeometryCollection FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Return return:yes Call Call Call Raise Call FunctionDef name:__len__ arg:self arguments arg Return return:yes FunctionDef name:add arg:self arg:geom arguments arg arg If Call If Call For Call Call If Call Assign Call Call Raise Call FunctionDef name:point_count arg:self arguments arg Return return:yes Call Call FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call Assign" + }, + { + "library": "tensorflow", + "name": "create_empty_output_dir", + "source_code": "def create_empty_output_dir(output_directory: str, overwrite: bool=True) -> None:\n if overwrite and file_io.file_exists_v2(output_directory):\n logging.info('Deleting existing output directory: %s .', output_directory)\n file_io.delete_recursively_v2(output_directory)\n file_io.recursive_create_dir_v2(output_directory)", + "docstring": "Creates the . If already exists, it recursively deletes all contents inside the directory. Also creates the parent & intermediate directories. Args: output_directory: Output directory. overwrite: Where to clean the output directory if exists.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py", + "ast_data": "FunctionDef name:create_empty_output_dir arg:output_directory arg:overwrite arguments arg arg If BoolOp Call Call Call Call" + }, + { + "library": "pandas", + "name": "area", + "source_code": "def area(self, x: Hashable | None=None, y: Hashable | None=None, stacked: bool=True, **kwargs) -> PlotAccessor:\n return self(kind='area', x=x, y=y, stacked=stacked, **kwargs)", + "docstring": "Draw a stacked area plot. An area plot displays quantitative data visually. This function wraps the matplotlib area function. Parameters ---------- x : label or position, optional Coordinates for the X axis. By default uses the index. y : label or position, optional Column to plot. By default uses all columns. stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot. **kwargs Additional keyword arguments are documented in :meth:. Returns ------- matplotlib.axes.Axes or numpy.ndarray Area plot, or array of area plots if subplots is True. See Also -------- DataFrame.plot : Make plots of DataFrame using matplotlib. Examples -------- Draw an area plot based on basic business metrics: .. plot:: :context: close-figs >>> df = pd.DataFrame( ... { ... \"sales\": [3, 2, 3, 9, 10, 6], ... \"signups\": [5, 5, 6, 12, 14, 13], ... \"visits\": [20, 42, 28, 62, 81, 50], ... }, ... index=pd.date_range( ... start=\"2018/01/01\", end=\"2018/07/01\", freq=\"ME\" ... ), ... ) >>> ax = df.plot.area() Area plots are stacked by default. To produce an unstacked plot, pass `x`: .. plot:: :context: close-figs >>> df = pd.DataFrame( ... { ... \"sales\": [3, 2, 3], ... \"visits\": [20, 42, 28], ... \"day\": [1, 2, 3], ... } ... ) >>> ax = df.plot.area(x=\"day\")", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_core.py", + "ast_data": "FunctionDef name:area arg:self arg:x arg:y arg:stacked arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "assemble_exception_table", + "source_code": "def assemble_exception_table(tab: list[ExceptionTableEntry]) -> bytes:\n b = []\n for entry in tab:\n first_entry = encode_exception_table_varint(entry.start // 2)\n first_entry[0] |= 1 << 7\n b.extend(first_entry)\n length = entry.end - entry.start + 2\n b.extend(encode_exception_table_varint(length // 2))\n b.extend(encode_exception_table_varint(entry.target // 2))\n dl = (entry.depth << 1) + entry.lasti\n b.extend(encode_exception_table_varint(dl))\n return bytes(b)", + "docstring": "Inverse of parse_exception_table - encodes list of exception table entries into bytes.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:assemble_exception_table arg:tab arguments arg Assign For Assign Call Call Assign Call Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "phone2numeric", + "source_code": "@keep_lazy_text\ndef phone2numeric(phone):\n char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}\n return ''.join((char2number.get(c, c) for c in phone.lower()))", + "docstring": "Convert a phone number with letters into its numeric equivalent.", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:phone2numeric arg:phone arguments arg Assign Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_check_param_device", + "source_code": "def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int:\n support_device_types = ['cuda', torch._C._get_privateuse1_backend_name()]\n if old_param_device is None:\n old_param_device = param.get_device() if param.device.type in support_device_types else -1\n else:\n warn = False\n if param.device.type in support_device_types:\n warn = param.get_device() != old_param_device\n else:\n warn = old_param_device != -1\n if warn:\n raise TypeError('Found two parameters on different devices, this is currently not supported.')\n return old_param_device", + "docstring": "Check if the parameters are located on the same device. Currently, the conversion between model parameters and single vector form is not supported for multiple allocations, e.g. parameters in different GPUs/PrivateUse1s, or mixture of CPU/GPU/PrivateUse1. Args: param ([Tensor]): a Tensor of a parameter of a model old_param_device (int): the device where the first parameter of a model is allocated. Returns: old_param_device (int): report device for the first time", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\convert_parameters.py", + "ast_data": "FunctionDef name:_check_param_device arg:param arg:old_param_device arguments arg arg Assign Call If Compare Assign Compare Call Assign If Compare Assign Compare Call Assign Compare If Raise Call Return return:yes" + }, + { + "library": "django", + "name": "_check_token_format", + "source_code": "def _check_token_format(token):\n if len(token) not in (CSRF_TOKEN_LENGTH, CSRF_SECRET_LENGTH):\n raise InvalidTokenFormat(REASON_INCORRECT_LENGTH)\n if invalid_token_chars_re.search(token):\n raise InvalidTokenFormat(REASON_INVALID_CHARACTERS)", + "docstring": "Raise an InvalidTokenFormat error if the token has an invalid length or characters that aren't allowed. The token argument can be a CSRF cookie secret or non-cookie CSRF token, and either masked or unmasked.", + "type": "function", + "file_path": "django\\django\\middleware\\csrf.py", + "ast_data": "FunctionDef name:_check_token_format arg:token arguments arg If Compare Call Raise Call If Call Raise Call" + }, + { + "library": "scipy", + "name": "_correa_entropy", + "source_code": "def _correa_entropy(X, m, *, xp):\n n = X.shape[-1]\n X = _pad_along_last_axis(X, m, xp=xp)\n i = xp.arange(1, n + 1)\n dj = xp.arange(-m, m + 1)[:, None]\n j = i + dj\n j0 = j + m - 1\n Xibar = xp.mean(X[..., j0], axis=-2, keepdims=True)\n difference = X[..., j0] - Xibar\n num = xp.sum(difference * dj, axis=-2)\n den = n * xp.sum(difference ** 2, axis=-2)\n return -xp.mean(xp.log(num / den), axis=-1)", + "docstring": "Compute the Correa estimator as described in [6].", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_entropy.py", + "ast_data": "FunctionDef name:_correa_entropy arg:X arg:m arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_minimize_trust_ncg", + "source_code": "def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options):\n if jac is None:\n raise ValueError('Jacobian is required for Newton-CG trust-region minimization')\n if hess is None and hessp is None:\n raise ValueError('Either the Hessian or the Hessian-vector product is required for Newton-CG trust-region minimization')\n return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options)", + "docstring": "Minimization of scalar function of one or more variables using the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than before successful termination.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_ncg.py", + "ast_data": "FunctionDef name:_minimize_trust_ncg arg:fun arg:x0 arg:args arg:jac arg:hess arg:hessp arguments arg arg arg arg arg arg arg If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "as_integer_ratio", + "source_code": "def as_integer_ratio(self) -> tuple['SymInt', builtins.int]:\n return (self, 1)", + "docstring": "Represent this int as an exact integer ratio", + "type": "method", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:as_integer_ratio arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "set_asyncio_event_loop_policy", + "source_code": "def set_asyncio_event_loop_policy() -> None:\n _get_asyncio_event_loop_policy()", + "docstring": "The policy functions from asyncio often behave unexpectedly, so we restrict their use to the absolutely essential case. This should only be used to install the reactor.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\reactor.py", + "ast_data": "FunctionDef name:set_asyncio_event_loop_policy arguments Call" + }, + { + "library": "sphinx", + "name": "_todim", + "source_code": "def _todim(val: int | str) -> str:\n if val is None:\n return 'initial'\n elif str(val).isdigit():\n return '0' if int(val) == 0 else '%spx' % val\n return val", + "docstring": "Make val a css dimension. In particular the following transformations are performed: - None -> 'initial' (default CSS value) - 0 -> '0' - ints and string representations of ints are interpreted as pixels. Everything else is returned unchanged.", + "type": "function", + "file_path": "sphinx\\sphinx\\jinja2glue.py", + "ast_data": "FunctionDef name:_todim arg:val arguments arg If Compare Return return:yes If Call Call Return return:yes Compare Call Return return:yes" + }, + { + "library": "django", + "name": "check_apps_ready", + "source_code": "def check_apps_ready(self):\n if not self.apps_ready:\n from django.conf import settings\n settings.INSTALLED_APPS\n raise AppRegistryNotReady(\"Apps aren't loaded yet.\")", + "docstring": "Raise an exception if all apps haven't been imported yet.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:check_apps_ready arg:self arguments arg If Raise Call" + }, + { + "library": "tensorflow", + "name": "flush", + "source_code": "@tf_export('summary.flush', v1=[])\ndef flush(writer=None, name=None):\n del name\n if writer is None:\n writer = _summary_state.writer\n if writer is None:\n return control_flow_ops.no_op()\n if isinstance(writer, SummaryWriter):\n return writer.flush()\n raise ValueError('Invalid argument to flush(): %r' % (writer,))", + "docstring": "Forces summary writer to send any buffered data to storage. This operation blocks until that finishes. Args: writer: The to flush. If None, the current default writer will be used instead; if there is no current writer, this returns . name: Ignored legacy argument for a name for the operation. Returns: The created .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:flush arg:writer arg:name arguments arg arg If Compare Assign If Compare Return return:yes Call If Call Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "get_debug_quantized_model", + "source_code": "def get_debug_quantized_model(self) -> bytes:\n return self._get_quantized_model(is_debug=True)", + "docstring": "Returns an instrumented quantized model. Convert the quantized model with the initialized converter and return bytes for model. The model will be instrumented with numeric verification operations and should only be used for debugging. Returns: Model bytes corresponding to the model. Raises: ValueError: if converter is not passed to the debugger.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:get_debug_quantized_model arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "PerRow", + "source_code": "class PerRow(Granularity):\n pass", + "docstring": "Represents row-wise granularity in quantization. This is a special case of per-axis quantization and is unique to Float8 matmuls where the input is quantized with a block_size of (1, ..., input.shape[-1]). And the weight is quantized with a block_size of (1, weight.shape[1]).", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "ClassDef name:PerRow" + }, + { + "library": "pytorch", + "name": "AnonymousAxis", + "source_code": "class AnonymousAxis:\n\n def __init__(self, value: str) -> None:\n self.value = int(value)\n if self.value < 1:\n raise ValueError(f'Anonymous axis should have positive length, not {self.value}')\n\n def __repr__(self) -> str:\n return f'{self.value}-axis'", + "docstring": "Used by to represent an axis with a size (> 1), but no associated identifier. Note: Different instances of this class are not equal to each other, even if they have the same value.", + "type": "class", + "file_path": "pytorch\\functorch\\einops\\_parsing.py", + "ast_data": "ClassDef name:AnonymousAxis FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign Call If Compare Raise Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "update_units", + "source_code": "def update_units(self, x):\n self.converter = mpl.units.registry.get_converter(x)\n if self.converter is not None:\n self.converter.default_units(x, self)\n info = self.converter.axisinfo(self.units, self)\n if info is None:\n return\n if info.majloc is not None:\n self.set_major_locator(info.majloc)\n if info.majfmt is not None:\n self.set_major_formatter(info.majfmt)", + "docstring": "Pass units to the internal converter, potentially updating its mapping.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\scales.py", + "ast_data": "FunctionDef name:update_units arg:self arg:x arguments arg arg Assign Call If Compare Call Assign Call If Compare Return return:no If Compare Call If Compare Call" + }, + { + "library": "pytorch", + "name": "LowerCholeskyTransform", + "source_code": "class LowerCholeskyTransform(Transform):\n domain = constraints.independent(constraints.real, 2)\n codomain = constraints.lower_cholesky\n\n def __eq__(self, other):\n return isinstance(other, LowerCholeskyTransform)\n\n def _call(self, x):\n return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed()\n\n def _inverse(self, y):\n return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed()", + "docstring": "Transform from unconstrained matrices to lower-triangular matrices with nonnegative diagonal entries. This is useful for parameterizing positive definite matrices in terms of their Cholesky factorization.", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\transforms.py", + "ast_data": "ClassDef name:LowerCholeskyTransform Assign Call Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call Call Call Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "callable", + "source_code": "def callable(self, *args, **kwargs):\n innerfunc = cherrypy.serving.request.handler\n\n def wrap(*args, **kwargs):\n return self.newhandler(innerfunc, *args, **kwargs)\n cherrypy.serving.request.handler = wrap", + "docstring": "Decorate a request handler with a handler tool callable.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:callable arg:self arguments arg arg arg Assign FunctionDef name:wrap arguments arg arg Return return:yes Call Assign" + }, + { + "library": "tensorflow", + "name": "simple_save", + "source_code": "@tf_export(v1=['saved_model.simple_save'])\n@deprecation.deprecated(None, 'This API was designed for TensorFlow v1. See https://www.tensorflow.org/guide/migrate for instructions on how to migrate your code to TensorFlow v2.')\ndef simple_save(session, export_dir, inputs, outputs, legacy_init_op=None):\n signature_def_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.predict_signature_def(inputs, outputs)}\n b = builder.SavedModelBuilder(export_dir)\n b.add_meta_graph_and_variables(session, tags=[tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), main_op=legacy_init_op, clear_devices=True)\n b.save()", + "docstring": "Convenience function to build a SavedModel suitable for serving. In many common cases, saving models for serving will be as simple as: simple_save(session, export_dir, inputs={\"x\": x, \"y\": y}, outputs={\"z\": z}) Although in many cases it's not necessary to understand all of the many ways to configure a SavedModel, this method has a few practical implications: - It will be treated as a graph for inference / serving (i.e. uses the tag ) - The SavedModel will load in TensorFlow Serving and supports the [Predict API]( To use the Classify, Regress, or MultiInference APIs, please see the [SavedModel APIs]( - Some TensorFlow ops depend on information on disk or other information called \"assets\". These are generally handled automatically by adding the assets to the collection. Only assets in that collection are exported; if you need more custom behavior, you'll need to use the [SavedModelBuilder]( More information about SavedModel and signatures can be found here: Args: session: The TensorFlow session from which to save the meta graph and variables. export_dir: The path to which the SavedModel will be stored. inputs: dict mapping string input names to tensors. These are added to the SignatureDef as the inputs. outputs: dict mapping string output names to tensors. These are added to the SignatureDef as the outputs. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\simple_save.py", + "ast_data": "FunctionDef name:simple_save arg:session arg:export_dir arg:inputs arg:outputs arg:legacy_init_op arguments arg arg arg arg arg Assign Call Assign Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_is_type_subset", + "source_code": "def _is_type_subset(a, b):\n if isinstance(a, type_spec.TypeSpec):\n return a.most_specific_compatible_type(b) == a\n return True", + "docstring": "Returns true if is a subset of type (or if a is not a TypeSpec.)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:_is_type_subset arg:a arg:b arguments arg arg If Call Return return:yes Compare Call Return return:yes" + }, + { + "library": "kornia", + "name": "vec_like", + "source_code": "def vec_like(n: int, tensor: Tensor, shared_memory: bool=False) -> Tensor:\n if n <= 0:\n raise AssertionError(type(n), n)\n if len(tensor.shape) < 1:\n raise AssertionError(tensor.shape)\n vec = zeros(n, 1, device=tensor.device, dtype=tensor.dtype)\n return vec[None].expand(tensor.shape[0], n, 1) if shared_memory else vec[None].repeat(tensor.shape[0], 1, 1)", + "docstring": "Return a 2-D tensor with a vector containing zeros with the same batch size as the input. Args: n: the number of rows :math:. tensor: image tensor that will determine the batch size of the output matrix. The expected shape is :math:. shared_memory: when set, all samples in the batch will share the same memory. Returns: The vector with the same batch size as the input :math:. Notes: When the dimension to expand is of size 1, using torch.expand(...) yields the same tensor as torch.repeat(...) without using extra memory. Thus, when the tensor obtained by this method will be later assigned - use this method with shared_memory=False, otherwise, prefer using it with shared_memory=True.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\misc.py", + "ast_data": "FunctionDef name:vec_like arg:n arg:tensor arg:shared_memory arguments arg arg arg If Compare Raise Call Call If Compare Call Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_assert_concat_compatible_structured_tensors", + "source_code": "def _assert_concat_compatible_structured_tensors(values):\n if not isinstance(values, Sequence):\n raise ValueError('values must be a list of StructuredTensors (not a list)')\n if not values:\n raise ValueError('values must not be an empty list')\n for st in values:\n if not isinstance(st, StructuredTensor):\n raise ValueError('values must be a list of StructuredTensors')\n _assert_all_paths_match(values)\n _assert_all_ranks_match(values)", + "docstring": "Sometimes raises an error if concat doesn't make sense statically on values. values must be a sequence, and each element in values must be a structured tensor, and must have the same paths. Additionally, each path that is a submessage must have the same rank. These constraints are sufficient for concat on the fields to be the same as concat on structured tensors. This is meant to capture scenarios like paths that are not in the first structured tensor, but are in later structured tensors, which will just be ignored by the recursive algorithm. If the rank of a submessage was different for two structured tensors, then that is also a non-sensical merge. Note that all of these checks are static, as paths and submessage ranks are known. Args: values: a Sequence of StructuredTensors. Raises: ValueError: if there is any inconsistency as described above.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_assert_concat_compatible_structured_tensors arg:values arguments arg If Call Raise Call If Raise Call For If Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "_set_checkpoint_initializer", + "source_code": "def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer'):\n base_type = variable.dtype.base_dtype\n with ops.device(variable.device), ops.device('/cpu:0'):\n restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]\n names_to_saveables = saveable_object_util.op_list_to_dict([variable])\n saveable_objects = []\n for name, op in names_to_saveables.items():\n for s in saveable_object_util.saveable_objects_for_op(op, name):\n saveable_objects.append(s)\n assert len(saveable_objects) == 1\n init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)\n variable._initializer_op = init_op\n restore_op.set_shape(variable.shape)\n variable._initial_value = restore_op", + "docstring": "Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py", + "ast_data": "FunctionDef name:_set_checkpoint_initializer arg:variable arg:ckpt_file arg:tensor_name arg:slice_spec arg:name arguments arg arg arg arg arg Assign With Call Call Assign Call Assign Call Assign For Call For Call Call Compare Call Assign Call Assign Call Assign" + }, + { + "library": "scipy", + "name": "lagrange", + "source_code": "def lagrange(x, w):\n M = len(x)\n p = poly1d(0.0)\n for j in range(M):\n pt = poly1d(w[j])\n for k in range(M):\n if k == j:\n continue\n fac = x[j] - x[k]\n pt *= poly1d([1.0, -x[k]]) / fac\n p += pt\n return p", + "docstring": "Return a Lagrange interpolating polynomial. Given two 1-D arrays and returns the Lagrange interpolating polynomial through the points `xwxnumpy.poly1dscipy.interpolate.BarycentricInterpolatorf(x) = x^3` by 3 points. >>> import numpy as np >>> from scipy.interpolate import lagrange >>> x = np.array([0, 1, 2]) >>> y = x**3 >>> poly = lagrange(x, y) Since there are only 3 points, the Lagrange polynomial has degree 2. Explicitly, it is given by .. math:: \\begin{aligned} L(x) &= 1\\times \\frac{x (x - 2)}{-1} + 8\\times \\frac{x (x-1)}{2} \\\\ &= x (-2 + 3x) \\end{aligned} >>> from numpy.polynomial.polynomial import Polynomial >>> Polynomial(poly.coef[::-1]).coef array([ 0., -2., 3.]) >>> import matplotlib.pyplot as plt >>> x_new = np.arange(0, 2.1, 0.1) >>> plt.scatter(x, y, label='data') >>> plt.plot(x_new, Polynomial(poly.coef[::-1])(x_new), label='Polynomial') >>> plt.plot(x_new, 3*x_new**2 - 2*x_new + 0*x_new, ... label=r\"$3 x^2 - 2 x$\", linestyle='-.') >>> plt.legend() >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:lagrange arg:x arg:w arguments arg arg Assign Call Assign Call For Call Assign Call For Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "can_handle", + "source_code": "@staticmethod\ndef can_handle(x, y=None):\n raise NotImplementedError", + "docstring": "Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:can_handle arg:x arg:y arguments arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "iteration_ends", + "source_code": "def iteration_ends(self, time_step):\n pass", + "docstring": "Perform update to learning rate and potentially other states at the end of an iteration", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py", + "ast_data": "FunctionDef name:iteration_ends arg:self arg:time_step arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "ndindex", + "source_code": "def ndindex(*x: int) -> Generator[tuple[int, ...]]:\n if not x:\n yield ()\n return\n for i in ndindex(*x[:-1]):\n for j in range(x[-1]):\n yield (*i, j)", + "docstring": "Generate all N-dimensional indices for a given array shape. Given the shape of an array, an ndindex instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. This has an identical API to numpy.ndindex. Parameters ---------- *x : int The shape of the array.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py", + "ast_data": "FunctionDef name:ndindex arguments arg If Return return:no For Call For Call" + }, + { + "library": "django", + "name": "i18n_javascript", + "source_code": "def i18n_javascript(self, request, extra_context=None):\n return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)", + "docstring": "Display the i18n JavaScript that the Django admin requires. is unused but present for consistency with the other admin views.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:i18n_javascript arg:self arg:request arg:extra_context arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "select_columns_by_name", + "source_code": "@abstractmethod\ndef select_columns_by_name(self, names: Sequence[str]) -> DataFrame:\n pass", + "docstring": "Create a new DataFrame by selecting a subset of columns by name.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "FunctionDef name:select_columns_by_name arg:self arg:names arguments arg arg" + }, + { + "library": "pandas", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n if self._info_repr():\n buf = StringIO()\n self.info(buf=buf)\n return buf.getvalue()\n repr_params = fmt.get_dataframe_repr_params()\n return self.to_string(**repr_params)", + "docstring": "Return a string representation for a particular DataFrame.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg If Call Assign Call Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__arrow_array__", + "source_code": "def __arrow_array__(self, type=None):\n import pyarrow\n from pandas.core.arrays.arrow.extension_types import ArrowPeriodType\n if type is not None:\n if pyarrow.types.is_integer(type):\n return pyarrow.array(self._ndarray, mask=self.isna(), type=type)\n elif isinstance(type, ArrowPeriodType):\n if self.freqstr != type.freq:\n raise TypeError(f\"Not supported to convert PeriodArray to array with different 'freq' ({self.freqstr} vs {type.freq})\")\n else:\n raise TypeError(f\"Not supported to convert PeriodArray to '{type}' type\")\n period_type = ArrowPeriodType(self.freqstr)\n storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type='int64')\n return pyarrow.ExtensionArray.from_storage(period_type, storage_array)", + "docstring": "Convert myself into a pyarrow Array.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\period.py", + "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg If Compare If Call Return return:yes Call Call If Call If Compare Raise Call Raise Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "logpdf", + "source_code": "def logpdf(self, x, s2, mu=0, lmbda=1, a=1, b=1):\n invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b)\n s2 = args[1]\n with np.errstate(all='ignore'):\n logpdf = np.asarray(self._logpdf(*args))\n logpdf[s2 <= 0] = -np.inf\n logpdf[invalid] = np.nan\n return logpdf[()]", + "docstring": "Log of the probability density function. Parameters ---------- x, s2 : array_like Arguments. must be greater than zero. mu, lmbda, a, b : array_like, optional Shape parameters. , , and must be greater than zero. Returns ------- logpdf : ndarray or scalar Log of the probability density function.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:s2 arg:mu arg:lmbda arg:a arg:b arguments arg arg arg arg arg arg arg Assign Call Assign With Call Assign Call Call Assign Compare Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "status", + "source_code": "@property\ndef status(self):\n _, status = self.args[:2]\n return status", + "docstring": "The integer HTTP status code to emit.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:status arg:self arguments arg Assign Return return:yes" + }, + { + "library": "seaborn", + "name": "numeric_mapping", + "source_code": "def numeric_mapping(self, data, palette, norm):\n if isinstance(palette, dict):\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n else:\n levels = list(np.sort(remove_na(data.unique())))\n palette = 'ch:' if palette is None else palette\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = '``hue_norm`` must be None, tuple, or Normalize object.'\n raise ValueError(err)\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n return (levels, lookup_table, norm, cmap)", + "docstring": "Determine colors when the hue variable is quantitative.", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:numeric_mapping arg:self arg:data arg:palette arg:norm arguments arg arg arg arg If Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Compare If Call Assign Assign Call If Compare Assign Call If Call Assign Call If Call Assign Raise Call If Call Call Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "virtualenv", + "name": "add_parser_arguments", + "source_code": "@classmethod\ndef add_parser_arguments(cls, parser, interpreter, meta, app_data):\n parser.add_argument('dest', help='directory to create virtualenv at', type=cls.validate_dest)\n parser.add_argument('--clear', dest='clear', action='store_true', help='remove the destination directory if exist before starting (will overwrite files otherwise)', default=False)\n parser.add_argument('--no-vcs-ignore', dest='no_vcs_ignore', action='store_true', help=\"don't create VCS ignore directive in the destination directory\", default=False)", + "docstring": "Add CLI arguments for the creator. :param parser: the CLI parser :param app_data: the application data folder :param interpreter: the interpreter we're asked to create virtual environment for :param meta: value as returned by :meth:", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py", + "ast_data": "FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arg:meta arg:app_data arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "record_if", + "source_code": "@tf_export('summary.record_if', v1=[])\n@tf_contextlib.contextmanager\ndef record_if(condition):\n old = _summary_state.is_recording\n try:\n _summary_state.is_recording = condition\n yield\n finally:\n _summary_state.is_recording = old", + "docstring": "Sets summary recording on or off per the provided boolean value. The provided value can be a python boolean, a scalar boolean Tensor, or or a callable providing such a value; if a callable is passed it will be invoked on-demand to determine whether summary writing will occur. Note that when calling record_if() in an eager mode context, if you intend to provide a varying condition like , you must wrap this in a callable to avoid immediate eager evaluation of the condition. In particular, using a callable is the only way to have your condition evaluated as part of the traced body of an @tf.function that is invoked from within the context. Args: condition: can be True, False, a bool Tensor, or a callable providing such. Yields: Returns a context manager that sets this value on enter and restores the previous value on exit.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:record_if arg:condition arguments arg Assign Try Assign Assign Call" + }, + { + "library": "sphinx", + "name": "I18nTags", + "source_code": "class I18nTags(Tags):\n\n def eval_condition(self, condition: Any) -> bool:\n return True", + "docstring": "Dummy tags module for I18nBuilder. To ensure that all text inside `` regardless the defined tags.", + "type": "class", + "file_path": "sphinx\\sphinx\\builders\\gettext.py", + "ast_data": "ClassDef name:I18nTags FunctionDef name:eval_condition arg:self arg:condition arguments arg arg Return return:yes" + }, + { + "library": "scrapy", + "name": "requestTunnel", + "source_code": "def requestTunnel(self, protocol: Protocol) -> Protocol:\n assert protocol.transport\n tunnelReq = tunnel_request_data(self._tunneledHost, self._tunneledPort, self._proxyAuthHeader)\n protocol.transport.write(tunnelReq)\n self._protocolDataReceived = protocol.dataReceived\n protocol.dataReceived = self.processProxyResponse\n self._protocol = protocol\n return protocol", + "docstring": "Asks the proxy to open a tunnel.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py", + "ast_data": "FunctionDef name:requestTunnel arg:self arg:protocol arguments arg arg Assign Call Call Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "major", + "source_code": "@property\ndef major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0", + "docstring": "The first item of :attr: or `` if unavailable. >>> Version(\"1.2.3\").major 1", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:major arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "scikit-learn", + "name": "diag", + "source_code": "def diag(self, X):\n return np.vstack([kernel.diag(X) for kernel in self.kernels]).T", + "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to ; however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_scalar_type_key", + "source_code": "def _scalar_type_key(typ):\n dt = dtype(typ)\n return (dt.kind.lower(), dt.itemsize)", + "docstring": "A `sorted`.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\numerictypes.py", + "ast_data": "FunctionDef name:_scalar_type_key arg:typ arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "MatWriteError", + "source_code": "class MatWriteError(Exception):\n pass", + "docstring": "Exception indicating a write issue.", + "type": "class", + "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py", + "ast_data": "ClassDef name:MatWriteError" + }, + { + "library": "tensorflow", + "name": "_merge_partition_lists", + "source_code": "def _merge_partition_lists(partition_lists):\n dst = list(partition_lists[0])\n for src in partition_lists[1:]:\n if len(src) != len(dst):\n raise ValueError('All ragged inputs must have the same ragged_rank.')\n for i in range(len(dst)):\n dst[i] = dst[i]._merge_precomputed_encodings(src[i])\n return dst", + "docstring": "Merges the given list of lists of RowPartitions. Args: partition_lists: A list of lists of RowPartition. Returns: A list of RowPartitions, where is formed by merging for all , using .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_functional_ops.py", + "ast_data": "FunctionDef name:_merge_partition_lists arg:partition_lists arguments arg Assign Call For If Compare Call Call Raise Call For Call Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_fill_limit_area_2d", + "source_code": "def _fill_limit_area_2d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None:\n neg_mask = ~mask.T\n if limit_area == 'outside':\n la_mask = np.maximum.accumulate(neg_mask, axis=0) & np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]\n else:\n la_mask = ~np.maximum.accumulate(neg_mask, axis=0) | ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]\n mask[la_mask.T] = False", + "docstring": "Prepare 2d mask for ffill/bfill with limit_area. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { \"outside\", \"inside\" } Whether to limit filling to outside or inside the outer most non-NA value.", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:_fill_limit_area_2d arg:mask arg:limit_area arguments arg arg Assign If Compare Assign Call Call Assign Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "_decision_function", + "source_code": "def _decision_function(self, X):\n check_is_fitted(self)\n if sparse.issparse(X):\n return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n else:\n return super()._decision_function(X)", + "docstring": "Decision function of the linear model. Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : ndarray of shape (n_samples,) The predicted decision function.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py", + "ast_data": "FunctionDef name:_decision_function arg:self arg:X arguments arg arg Call If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_gid", + "source_code": "def get_gid(self):\n return self._gid", + "docstring": "Return the group id.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_gid arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_values", + "source_code": "@property\ndef _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray:\n mgr = self._mgr\n blocks = mgr.blocks\n if len(blocks) != 1:\n return ensure_wrapped_if_datetimelike(self.values)\n arr = blocks[0].values\n if arr.ndim == 1:\n return self.values\n arr = cast('np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray', arr)\n return arr.T", + "docstring": "Analogue to ._values that may return a 2D ExtensionArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_values arg:self arguments arg Assign Assign If Compare Call Return return:yes Call Assign If Compare Return return:yes Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "datalim_to_dt", + "source_code": "def datalim_to_dt(self):\n dmin, dmax = self.axis.get_data_interval()\n if dmin > dmax:\n dmin, dmax = (dmax, dmin)\n return (num2date(dmin, self.tz), num2date(dmax, self.tz))", + "docstring": "Convert axis data interval to datetime objects.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "FunctionDef name:datalim_to_dt arg:self arguments arg Assign Call If Compare Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "ListGetItemSource", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass ListGetItemSource(GetItemSource):\n\n def reconstruct(self, codegen: 'PyCodegen'):\n codegen.add_push_null(lambda: codegen.load_import_from(utils.__name__, 'list_getitem'))\n codegen(self.base)\n if self.index_is_slice:\n raise RuntimeError('List[slice] is a temporary object and should not have a source')\n else:\n codegen.append_output(codegen.create_load_const(self.index))\n codegen.extend_output(create_call_function(2, False))\n\n def name(self):\n assert not isinstance(self.index, Source)\n if self.index_is_slice:\n raise RuntimeError('List[slice] is a temporary object and should not have a source')\n else:\n return f'list.__getitem__({self.base.name()}, {self.index!r})'", + "docstring": "Same as GetItemSource with reconstruct and name overridden to be list specific.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\source.py", + "ast_data": "ClassDef name:ListGetItemSource FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Call arguments Call Call If Raise Call Call Call Call Call FunctionDef name:name arg:self arguments arg Call If Raise Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "encode_varint", + "source_code": "def encode_varint(n: int) -> list[int]:\n assert n >= 0\n b = [n & 63]\n n >>= 6\n while n > 0:\n b[-1] |= 64\n b.append(n & 63)\n n >>= 6\n return b", + "docstring": "6-bit chunk encoding of an unsigned integer See", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:encode_varint arg:n arguments arg Compare Assign While Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_XLog1pyGrad", + "source_code": "@ops.RegisterGradient('Xlog1py')\ndef _XLog1pyGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n with ops.control_dependencies([grad]):\n not_zero_x = math_ops.cast(math_ops.not_equal(x, math_ops.cast(0.0, dtype=x.dtype)), dtype=x.dtype)\n partial_x = gen_math_ops.xlog1py(not_zero_x, y)\n partial_y = gen_math_ops.xdivy(x, y + 1.0)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))", + "docstring": "Returns gradient of xlog1py(x, y) with respect to x and y.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_XLog1pyGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_full_name", + "source_code": "def _full_name(self):\n return 'projects/%s/locations/%s/nodes/%s' % (self._project, self._zone, self._tpu)", + "docstring": "Returns the full Cloud name for this TPU.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", + "ast_data": "FunctionDef name:_full_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "DistributedIteratorInterface", + "source_code": "@tf_export('distribute.DistributedIterator', v1=[])\nclass DistributedIteratorInterface(Iterator):\n\n def get_next(self):\n raise NotImplementedError('DistributedIterator.get_next() must be implemented in descendants.')\n\n @property\n def element_spec(self):\n raise NotImplementedError('DistributedIterator.element_spec() must be implemented in descendants')\n\n def get_next_as_optional(self):\n raise NotImplementedError('get_next_as_optional() not implemented in descendants')", + "docstring": "An iterator over . is the primary mechanism for enumerating elements of a . It supports the Python Iterator protocol, which means it can be iterated over using a for-loop or by fetching individual elements explicitly via . You can create a by calling on a or creating a python loop over a . Visit the [tutorial]( on distributed input for more examples and caveats.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", + "ast_data": "ClassDef name:DistributedIteratorInterface FunctionDef name:get_next arg:self arguments arg Raise Call FunctionDef name:element_spec arg:self arguments arg Raise Call FunctionDef name:get_next_as_optional arg:self arguments arg Raise Call Call" + }, + { + "library": "scipy", + "name": "whiten", + "source_code": "@xp_capabilities()\ndef whiten(obs, check_finite=None):\n xp = array_namespace(obs)\n if check_finite is None:\n check_finite = not is_lazy_array(obs)\n obs = _asarray(obs, check_finite=check_finite, xp=xp)\n std_dev = xp.std(obs, axis=0)\n zero_std_mask = std_dev == 0\n std_dev = xpx.at(std_dev, zero_std_mask).set(1.0)\n if check_finite and xp.any(zero_std_mask):\n warnings.warn('Some columns have standard deviation zero. The values of these columns will not change.', RuntimeWarning, stacklevel=2)\n return obs / std_dev", + "docstring": "Normalize a group of observations on a per feature basis. Before running k-means, it is beneficial to rescale each feature dimension of the observation set by its standard deviation (i.e. \"whiten\" it - as in \"white noise\" where each frequency has equal power). Each feature is divided by its standard deviation across all observations to give it unit variance. Parameters ---------- obs : ndarray Each row of the array is an observation. The columns are the features seen during each observation:: # f0 f1 f2 obs = [[ 1., 1., 1.], #o0 [ 2., 2., 2.], #o1 [ 3., 3., 3.], #o2 [ 4., 4., 4.]] #o3 check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True for eager backends and False for lazy ones. Returns ------- result : ndarray Contains the values in scaled by the standard deviation of each column. Examples -------- >>> import numpy as np >>> from scipy.cluster.vq import whiten >>> features = np.array([[1.9, 2.3, 1.7], ... [1.5, 2.5, 2.2], ... [0.8, 0.6, 1.7,]]) >>> whiten(features) array([[ 4.17944278, 2.69811351, 7.21248917], [ 3.29956009, 2.93273208, 9.33380951], [ 1.75976538, 0.7038557 , 7.21248917]])", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\vq.py", + "ast_data": "FunctionDef name:whiten arg:obs arg:check_finite arguments arg arg Assign Call If Compare Assign Call Assign Call Assign Call Assign Compare Assign Call Call If BoolOp Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "compute_gradient_error", + "source_code": "@tf_export(v1=['test.compute_gradient_error'])\n@deprecation.deprecated(date=None, instructions='Use tf.test.compute_gradient in 2.0, which has better support for functions. Note that the two versions have different usage, so code change is needed.')\ndef compute_gradient_error(x, x_shape, y, y_shape, x_init_value=None, delta=0.001, init_targets=None, extra_feed_dict=None):\n grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta, init_targets, extra_feed_dict=extra_feed_dict)\n return _compute_error(grad)", + "docstring": "Computes the gradient error. Computes the maximum error for dy/dx between the computed Jacobian and the numerically estimated Jacobian. This function will modify the tensors passed in as it adds more operations and hence changing the consumers of the operations of the input tensors. This function adds operations to the current session. To compute the error using a particular device, such as a GPU, use the standard methods for setting a device (e.g. using with sess.graph.device() or setting a device function in the session constructor). Args: x: a tensor or list of tensors x_shape: the dimensions of x as a tuple or an array of ints. If x is a list, then this is the list of shapes. y: a tensor y_shape: the dimensions of y as a tuple or an array of ints. x_init_value: (optional) a numpy array of the same shape as \"x\" representing the initial value of x. If x is a list, this should be a list of numpy arrays. If this is none, the function will pick a random tensor as the initial value. delta: (optional) the amount of perturbation. init_targets: list of targets to run to initialize model params. extra_feed_dict: dict that allows fixing specified tensor values during the Jacobian calculation. Returns: The maximum error in between the two Jacobians.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker.py", + "ast_data": "FunctionDef name:compute_gradient_error arg:x arg:x_shape arg:y arg:y_shape arg:x_init_value arg:delta arg:init_targets arg:extra_feed_dict arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "get_default_dynamic_sparse_quant_module_mappings", + "source_code": "def get_default_dynamic_sparse_quant_module_mappings() -> dict[Callable, Any]:\n return DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS", + "docstring": "Get module mapping for post training dynamic sparse quantization", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:get_default_dynamic_sparse_quant_module_mappings arguments Return return:yes" + }, + { + "library": "matplotlib", + "name": "_min_in_bounds", + "source_code": "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", + "docstring": "Ensure the new min value is between valmin and self.val[1].", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_min_in_bounds arg:self arg:min arguments arg arg If Compare If Return return:yes Assign If Compare Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_partition_value", + "source_code": "def _partition_value(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int) -> torch.Tensor:\n if self.reduce_op in ('max', 'min'):\n return tensor\n elif self.reduce_op == 'sum':\n if self.norm_type == 0:\n raise NotImplementedError(f'Unsupported norm type:: {self.norm_type}')\n elif self.norm_type == 1:\n return tensor / mesh.size(mesh_dim)\n assert isinstance(self.norm_type, (int, float))\n return tensor / math.pow(mesh.size(mesh_dim), 1 / self.norm_type)\n raise NotImplementedError(self.reduce_op)", + "docstring": "For example, consider 4 ranks, a (3,) replicated tensor, and 2-norm: Ranks 0 and 1: sqrt(t1^2 + t2^2 + t3^3) To convert from replicated to partial, we want f(x) such that sqrt(t1^2 + t2^2 + t3^3) = sqrt(4f(t1)^2 + 4f(t2)^2 + 4f(t3)^2) = sqrt(4) sqrt(f(t1)^2 + f(t2)^2 + f(t3)^2). One such f(x) is f(x) = x / sqrt(4). This generalizes to d ranks and p-norm as f(x) = x / d^(1/p).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_math_ops.py", + "ast_data": "FunctionDef name:_partition_value arg:self arg:tensor arg:mesh arg:mesh_dim arguments arg arg arg arg If Compare Return return:yes If Compare If Compare Raise Call If Compare Return return:yes Call Call Return return:yes Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_print_tensor", + "source_code": "def _print_tensor(tensor_name, num_elements, tensor, output_tensor):\n if self._parameters.is_brief_mode():\n if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:\n raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))\n msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]\n else:\n msg = '\"%s\"' % tensor_name\n if self._parameters.trace_dir:\n output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())\n output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n else:\n output_stream = sys.stderr\n return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\\n', output_tensor, '\\n', summarize=num_elements, output_stream=output_stream)", + "docstring": "Prints a tensor value to a file. Args: tensor_name: name of the tensor being traced. num_elements: number of elements to print (-1 means print all). tensor: the tensor needs to be returned. output_tensor: the tensor needs to be printed. Returns: The same tensor passed via the \"tensor\" argument. Raises: ValueError: If tensor_name is not already in tensor_trace_order.tensorname_to_cache_idx.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_print_tensor arg:tensor_name arg:num_elements arg:tensor arg:output_tensor arguments arg arg arg arg If Call If Compare Raise Call Assign Assign If Assign Call Call Assign Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_write_op_list_section", + "source_code": "def _write_op_list_section(self, graph_order):\n self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))\n self._write_report('%s %d\\n' % (_FIELD_NAME_NUM_OPS, len(graph_order.operations)))\n for i in range(0, len(graph_order.operations)):\n op = graph_order.operations[i]\n line = '%d \"%s\" %s' % (i, op.name, op.type)\n for out_tensor in op.outputs:\n if out_tensor.name not in graph_order.tensor_to_idx:\n raise ValueError('out_tensor is not in tensor_to_idx. out_tensor={}, tensor_to_idx={}'.format(out_tensor.name, graph_order.tensor_to_idx))\n line += ' %d' % graph_order.tensor_to_idx[out_tensor.name]\n line += '\\n'\n self._write_report(line)\n self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))", + "docstring": "Writes the Op-list section of the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_op_list_section arg:self arg:graph_order arguments arg arg Call Call Call For Call Call Assign Assign For If Compare Raise Call Call Call Call" + }, + { + "library": "pandas", + "name": "_prettify_tree", + "source_code": "def _prettify_tree(self) -> bytes:\n from xml.dom.minidom import parseString\n dom = parseString(self.out_xml)\n return dom.toprettyxml(indent=' ', encoding=self.encoding)", + "docstring": "Output tree for pretty print format. This method will pretty print xml with line breaks and indentation.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\xml.py", + "ast_data": "FunctionDef name:_prettify_tree arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "parse_raster", + "source_code": "def parse_raster(self, value):\n return from_pgraster(value)", + "docstring": "Convert a PostGIS HEX String into a dict readable by GDALRaster.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", + "ast_data": "FunctionDef name:parse_raster arg:self arg:value arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "__next__", + "source_code": "def __next__(self):\n raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')", + "docstring": "Iteration interface -- return the next item in the stream", + "type": "method", + "file_path": "django\\django\\core\\serializers\\base.py", + "ast_data": "FunctionDef name:__next__ arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_trackable_custom_creator", + "source_code": "def _trackable_custom_creator(next_creator, name, initial_value, trackable_parent=None, **kwargs):\n\n def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):\n inner_kwargs.pop('name')\n return next_creator(initial_value=initializer, name=name, **inner_kwargs)\n if name is not None and name.startswith(name_prefix):\n scope_stripped_name = name[len(name_prefix) + 1:]\n if not trackable_parent:\n return template._add_variable_with_custom_getter(initializer=initial_value, name=scope_stripped_name, getter=_call_next_creator_renaming_initializer, overwrite=True, trackable_parent=(template, name_prefix), **kwargs)\n else:\n parent_object, parent_name_prefix = trackable_parent\n template._track_trackable(parent_object, name=parent_name_prefix[len(name_prefix) + 1:], overwrite=True)\n return next_creator(name=name, initial_value=initial_value, trackable_parent=(template, name_prefix), **kwargs)", + "docstring": "A variable creation hook which adds Trackable dependencies. Set for example during a 's first wrapped function execution. Ensures that (a) depends on any trackable objects using their own scope inside this scope which create variables, and (b) that any variables not in a more deeply nested scope are added as dependencies directly. The argument is passed between custom creators but ignored when the variable object itself is created. This argument indicates (if not ) that a more deeply nested scope has already added the variable as a dependency, and that parent scopes should add a dependency on that object rather than on the variable directly. Args: next_creator: See ; the next creator in the chain. name: The (full, scope-influenced) name of the variable. The itself is stripped for the purposes of object-based dependency tracking, but scopes opened within this scope are respected. initial_value: See . Taken explicitly so the argument can be re-named and used with . trackable_parent: If not None, a more deeply nested trackable object and its name prefix which were passed to to add a dependency on (rather than depending on the variable directly). **kwargs: Passed through to the next creator. Returns: The output of : the fetched/created variable object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:_trackable_custom_creator arg:next_creator arg:name arg:initial_value arg:trackable_parent arguments arg arg arg arg arg FunctionDef name:_call_next_creator_renaming_initializer arg:initializer arguments arg arg Call Return return:yes Call If BoolOp Compare Call Assign Call If Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_initialized", + "source_code": "def is_initialized(self, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.is_initialized()\n if self._use_packed_variable():\n return self._packed_var.is_initialized()\n result = self._primary.is_initialized()\n for v in self._values[1:-1]:\n result = math_ops.logical_and(result, v.is_initialized())\n result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name)\n return result", + "docstring": "Identifies if all the component variables are initialized. Args: name: Name of the final op. Returns: The op that evaluates to True or False depending on if all the component variables are initialized.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:is_initialized arg:self arg:name arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Assign Call For Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_rfft", + "source_code": "def _rfft(input_tensor, fft_length=None, name=None):\n with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name:\n input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.float32)\n if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64):\n raise ValueError('RFFT requires tf.float32 or tf.float64 inputs, got: %s' % input_tensor)\n real_dtype = input_tensor.dtype\n if real_dtype == _dtypes.float32:\n complex_dtype = _dtypes.complex64\n else:\n assert real_dtype == _dtypes.float64\n complex_dtype = _dtypes.complex128\n input_tensor.shape.with_rank_at_least(fft_rank)\n if fft_length is None:\n fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)\n else:\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n return fft_fn(input_tensor, fft_length, Tcomplex=complex_dtype, name=name)", + "docstring": "Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_rfft arg:input_tensor arg:fft_length arg:name arguments arg arg arg With Call Assign Call If Compare Raise Call Assign If Compare Assign Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "order_by", + "source_code": "def order_by(self, attribute):\n self._options['order_by'] = attribute\n return self", + "docstring": "Order the displayed profiler nodes based on a attribute. Supported attribute includes micros, bytes, occurrence, params, etc. Args: attribute: An attribute the profiler node has. Returns: self", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py", + "ast_data": "FunctionDef name:order_by arg:self arg:attribute arguments arg arg Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_bbox", + "source_code": "def get_bbox(self):\n bbox = Bbox([[0, 0], [0, 0]])\n bbox.update_from_data_xy(self.get_xydata())\n return bbox", + "docstring": "Get the bounding box of this line.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:get_bbox arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_label_minor", + "source_code": "def set_label_minor(self, labelOnlyBase):\n self.labelOnlyBase = labelOnlyBase", + "docstring": "Switch minor tick labeling on or off. Parameters ---------- labelOnlyBase : bool If True, label ticks only at integer powers of base.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_label_minor arg:self arg:labelOnlyBase arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_replace_tensors_for_gradient", + "source_code": "def _replace_tensors_for_gradient(x, grad):\n if not isinstance(x, composite_tensor.CompositeTensor):\n return grad\n if not isinstance(x, CompositeTensorGradientProtocol):\n raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source.')\n composite_gradient = x.__composite_gradient__\n x_components = composite_gradient.get_gradient_components(x)\n if x_components is x:\n grad_components = grad\n else:\n grad_components = nest.map_structure_up_to(x_components, _replace_tensors_for_gradient, x_components, grad)\n if grad_components is None:\n return None\n return composite_gradient.replace_gradient_components(x, grad_components)", + "docstring": "Replaces the tensors in that should be differentiated with . Args: x: A or . grad: A nested structure of , with the same structure as the value returned by . Returns: A or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py", + "ast_data": "FunctionDef name:_replace_tensors_for_gradient arg:x arg:grad arguments arg arg If Call Return return:yes If Call Raise Call Call Assign Assign Call If Compare Assign Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_all_registered_ops", + "source_code": "def _all_registered_ops(self) -> set[str]:\n return {op_name_class.qualified_name() for op_name_class in self._registry.keys()}", + "docstring": "Returns the set of all registered function names.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "FunctionDef name:_all_registered_ops arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_serialize_to_proto", + "source_code": "def _serialize_to_proto(self, object_proto=None, **kwargs):\n del object_proto, kwargs\n return None", + "docstring": "Returns a proto of any type to be saved into the SavedModel. Trackable classes decorated with should overwrite this method to save metadata for this object to the SavedModel. The proto returned by this function will be passed to in the form of a proto. This data is only saved and used by the Python API. Existing C++ loading APIs such as will not read this field at all. Args: object_proto: A proto that may be filled by this function. Only the core serializable types (Variable, Function, Constant, Asset) should modify this argument. **kwargs: Future keyword arguments passed to the object during saving. Returns: A proto that serializes this class's type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py", + "ast_data": "FunctionDef name:_serialize_to_proto arg:self arg:object_proto arguments arg arg arg Return return:no" + }, + { + "library": "numpy", + "name": "DJBFFTNotFoundError", + "source_code": "class DJBFFTNotFoundError(NotFoundError):\n pass", + "docstring": "DJBFFT ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [djbfft]) or by setting the DJBFFT environment variable.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:DJBFFTNotFoundError" + }, + { + "library": "pytorch", + "name": "_cast", + "source_code": "def _cast(param, value, param_id=None, param_groups=None, key=None):\n if isinstance(value, torch.Tensor):\n return Optimizer._process_value_according_to_param_policy(param, value, param_id, param_groups, key)\n elif isinstance(value, dict):\n return {k: _cast(param, v, param_id=param_id, param_groups=param_groups, key=k) for k, v in value.items()}\n elif isinstance(value, Iterable):\n return type(value)((_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value))\n else:\n return value", + "docstring": "Make a deep copy of value, casting all tensors to device of param.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\optimizer.py", + "ast_data": "FunctionDef name:_cast arg:param arg:value arg:param_id arg:param_groups arg:key arguments arg arg arg arg arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_shape_common", + "source_code": "def _shape_common(s1, s2):\n s1 = tensor_shape.TensorShape(s1)\n s2 = tensor_shape.TensorShape(s2)\n if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims:\n return tensor_shape.unknown_shape()\n d = [d1 if d1 is not None and d1 == d2 else None for d1, d2 in zip(s1.as_list(), s2.as_list())]\n return tensor_shape.TensorShape(d)", + "docstring": "The greatest lower bound (ordered by specificity) TensorShape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:_shape_common arg:s1 arg:s2 arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Compare Return return:yes Call Assign BoolOp Compare Compare Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "node_def", + "source_code": "@property\ndef node_def(self):\n return self._node_def", + "docstring": "The proto representing the op that failed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:node_def arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "bucketize", + "source_code": "def bucketize(self, values: CSEVariable, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]]=None, sorter_indices: Optional[CSEVariable]=None) -> CSEVariable:\n raise NotImplementedError", + "docstring": "See [Note: Inductor bucketize op]", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", + "ast_data": "FunctionDef name:bucketize arg:self arg:values arg:boundaries arg:boundary_indices arg:indexing_dtype arg:right arg:sorter arg:sorter_indices arguments arg arg arg arg arg arg arg arg Raise" + }, + { + "library": "kornia", + "name": "find_essential", + "source_code": "def find_essential(points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:\n E = run_5point(points1, points2, weights).to(points1.dtype)\n return E", + "docstring": "Find essential matrices. Args: points1: A set of points in the first image with a tensor shape :math:. points2: A set of points in the second image with a tensor shape :math:. weights: Tensor containing the weights per point correspondence with a shape of :math:. Returns: the computed essential matrices with shape :math:. Note that all possible solutions are returned, i.e., 10 essential matrices for each image pair. To choose the best one out of 10, try to check the one with the lowest Sampson distance.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py", + "ast_data": "FunctionDef name:find_essential arg:points1 arg:points2 arg:weights arguments arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "close", + "source_code": "def close(self):\n if self._session and (not self._closed):\n self._closed = True\n tf_session.TF_CloseSession(self._session)", + "docstring": "Closes this session. Calling this method frees all resources associated with the session. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while closing the TensorFlow session.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:close arg:self arguments arg If BoolOp Assign Call" + }, + { + "library": "sphinx", + "name": "pending_xref", + "source_code": "class pending_xref(nodes.Inline, nodes.Element):\n child_text_separator = ''", + "docstring": "Node for cross-references that cannot be resolved without complete information about all documents. These nodes are resolved before writing output, in BuildEnvironment.resolve_references.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:pending_xref Assign" + }, + { + "library": "matplotlib", + "name": "MaxExtent", + "source_code": "class MaxExtent(_Base):\n\n def __init__(self, artist_list, w_or_h):\n self._artist_list = artist_list\n _api.check_in_list(['width', 'height'], w_or_h=w_or_h)\n self._w_or_h = w_or_h\n\n def add_artist(self, a):\n self._artist_list.append(a)\n\n def get_size(self, renderer):\n rel_size = 0.0\n extent_list = [getattr(a.get_window_extent(renderer), self._w_or_h) / a.figure.dpi for a in self._artist_list]\n abs_size = max(extent_list, default=0)\n return (rel_size, abs_size)", + "docstring": "Size whose absolute part is either the largest width or the largest height of the given *artist_list*.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", + "ast_data": "ClassDef name:MaxExtent FunctionDef name:__init__ arg:self arg:artist_list arg:w_or_h arguments arg arg arg Assign Call Assign FunctionDef name:add_artist arg:self arg:a arguments arg arg Call FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_approximate_basis", + "source_code": "def get_approximate_basis(A: Tensor, q: int, niter: Optional[int]=2, M: Optional[Tensor]=None) -> Tensor:\n niter = 2 if niter is None else niter\n dtype = _utils.get_floating_dtype(A) if not A.is_complex() else A.dtype\n matmul = _utils.matmul\n R = torch.randn(A.shape[-1], q, dtype=dtype, device=A.device)\n X = matmul(A, R)\n if M is not None:\n X = X - matmul(M, R)\n Q = torch.linalg.qr(X).Q\n for _ in range(niter):\n X = matmul(A.mH, Q)\n if M is not None:\n X = X - matmul(M.mH, Q)\n Q = torch.linalg.qr(X).Q\n X = matmul(A, Q)\n if M is not None:\n X = X - matmul(M, Q)\n Q = torch.linalg.qr(X).Q\n return Q", + "docstring": "Return tensor :math: with :math: orthonormal columns such that :math: approximates :math:. If :math: is specified, then :math: is such that :math: approximates :math:. without instantiating any tensors of the size of :math: or :math:. .. note:: The implementation is based on the Algorithm 4.4 from Halko et al., 2009. .. note:: For an adequate approximation of a k-rank matrix :math:, where k is not known in advance but could be estimated, the number of :math: columns, q, can be choosen according to the following criteria: in general, :math:_).", + "type": "function", + "file_path": "pytorch\\torch\\_lowrank.py", + "ast_data": "FunctionDef name:get_approximate_basis arg:A arg:q arg:niter arg:M arguments arg arg arg arg Assign Compare Assign Call Call Assign Assign Call Assign Call If Compare Assign Call Assign Call For Call Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "handle", + "source_code": "def handle(self, args, kwargs):\n return self.NOT_SUPPORTED", + "docstring": "Handle this dispatcher's operation with the specified arguments. If this operation dispatcher can handle the given arguments, then return an appropriate value (or raise an appropriate exception). Args: args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or if this dispatcher can not handle the given arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "check_embedded_msvcr_match_linked", + "source_code": "def check_embedded_msvcr_match_linked(msver):\n maj = msvc_runtime_major()\n if maj:\n if not maj == int(msver):\n raise ValueError('Discrepancy between linked msvcr (%d) and the one about to be embedded (%d)' % (int(msver), maj))", + "docstring": "msver is the ms runtime version used for the MANIFEST.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py", + "ast_data": "FunctionDef name:check_embedded_msvcr_match_linked arg:msver arguments arg Assign Call If If Compare Call Raise Call Call" + }, + { + "library": "kornia", + "name": "PatchEmbed", + "source_code": "class PatchEmbed(Module):\n\n def __init__(self, kernel_size: tuple[int, int]=(16, 16), stride: tuple[int, int]=(16, 16), padding: tuple[int, int]=(0, 0), in_chans: int=3, embed_dim: int=768) -> None:\n super().__init__()\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.proj(x)\n x = x.permute(0, 2, 3, 1)\n return x", + "docstring": "Image to Patch Embedding.", + "type": "class", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py", + "ast_data": "ClassDef name:PatchEmbed FunctionDef name:__init__ arg:self arg:kernel_size arg:stride arg:padding arg:in_chans arg:embed_dim arguments arg arg arg arg arg arg Call Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_precision", + "source_code": "def get_precision(self):\n if self.store_precision:\n precision = self.precision_\n else:\n precision = linalg.pinvh(self.covariance_, check_finite=False)\n return precision", + "docstring": "Getter for the precision matrix. Returns ------- precision_ : array-like of shape (n_features, n_features) The precision matrix associated to the current covariance object.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py", + "ast_data": "FunctionDef name:get_precision arg:self arguments arg If Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_validate_pruning_dim", + "source_code": "def _validate_pruning_dim(t, dim):\n if dim >= t.dim():\n raise IndexError(f'Invalid index {dim} for tensor of size {t.shape}')", + "docstring": "Validate that the pruning dimension is within the bounds of the tensor dimension. Args: t (torch.Tensor): tensor representing the parameter to prune dim (int): index of the dim along which we define channels to prune", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:_validate_pruning_dim arg:t arg:dim arguments arg arg If Compare Call Raise Call" + }, + { + "library": "pandas", + "name": "len", + "source_code": "def len(self) -> Series:\n from pandas import Series\n value_lengths = pc.list_value_length(self._pa_array)\n return Series(value_lengths, dtype=ArrowDtype(value_lengths.type), index=self._data.index, name=self._data.name)", + "docstring": "Return the length of each list in the Series. Returns ------- pandas.Series The length of each list. See Also -------- str.len : Python built-in function returning the length of an object. Series.size : Returns the length of the Series. StringMethods.len : Compute the length of each element in the Series/Index. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list.len() 0 3 1 1 dtype: int32[pyarrow]", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py", + "ast_data": "FunctionDef name:len arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "__lt__", + "source_code": "def __lt__(self, other_node):\n return self.split_info.gain > other_node.split_info.gain", + "docstring": "Comparison for priority queue. Nodes with high gain are higher priority than nodes with low gain. heapq.heappush only need the '<' operator. heapq.heappop take the smallest item first (smaller is higher priority). Parameters ---------- other_node : TreeNode The node to compare with.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "FunctionDef name:__lt__ arg:self arg:other_node arguments arg arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "is_generic_mapping", + "source_code": "def is_generic_mapping(tp):\n return tp not in (collections.abc.Mapping, typing.Mapping) and getattr(tp, '__origin__', None) in (collections.abc.Mapping, typing.Mapping)", + "docstring": "Returns true if is a parameterized typing.Mapping value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py", + "ast_data": "FunctionDef name:is_generic_mapping arg:tp arguments arg Return return:yes BoolOp Compare Compare Call" + }, + { + "library": "scipy", + "name": "qz", + "source_code": "@_apply_over_batch(('A', 2), ('B', 2))\ndef qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, overwrite_b=False, check_finite=True):\n result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite)\n return (result[0], result[1], result[-4], result[-3])", + "docstring": "QZ decomposition for generalized eigenvalues of a pair of matrices. The QZ, or generalized Schur, decomposition for a pair of n-by-n matrices (A,B) is:: (A,B) = (Q @ AA @ Z*, Q @ BB @ Z*) where AA, BB is in generalized Schur form if BB is upper-triangular with non-negative diagonal and AA is upper-triangular, or for real QZ decomposition (`AB` in the following expressions to verify the decomposition. >>> Q @ AA @ Z.conj().T # Should be A array([[ 1.-0.j, 2.-0.j, -1.-0.j], [ 5.+0.j, 5.+0.j, 5.-0.j], [ 2.+0.j, 4.+0.j, -8.+0.j]]) >>> Q @ BB @ Z.conj().T # Should be B array([[ 1.+0.j, 1.+0.j, -3.+0.j], [ 3.-0.j, 1.-0.j, -1.+0.j], [ 5.+0.j, 6.+0.j, -2.+0.j]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp_qz.py", + "ast_data": "FunctionDef name:qz arg:A arg:B arg:output arg:lwork arg:sort arg:overwrite_a arg:overwrite_b arg:check_finite arguments arg arg arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_clang_major_version", + "source_code": "def _get_clang_major_version(path_to_clang: str) -> int:\n logging.info('Running echo __clang_major__ | %s -E -P -', path_to_clang)\n clang_version_proc = subprocess.run([path_to_clang, '-E', '-P', '-'], input='__clang_major__', check=True, capture_output=True, text=True)\n major_version = int(clang_version_proc.stdout)\n logging.info('%s reports major version %s.', path_to_clang, major_version)\n return major_version", + "docstring": "Gets the major version of the clang at . Args: path_to_clang: Path to a clang executable Returns: The major version.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\configure\\configure.py", + "ast_data": "FunctionDef name:_get_clang_major_version arg:path_to_clang arguments arg Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "hex", + "source_code": "def hex(self) -> str:\n return self.node.guard_float('', 0).hex()", + "docstring": "Returns the hexadecimal representation of the float.", + "type": "method", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:hex arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "MatchingFilesDataset", + "source_code": "class MatchingFilesDataset(dataset_ops.DatasetSource):\n\n def __init__(self, patterns):\n self._patterns = ops.convert_to_tensor(patterns, dtype=dtypes.string, name='patterns')\n variant_tensor = ged_ops.matching_files_dataset(self._patterns)\n super(MatchingFilesDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return tensor_spec.TensorSpec([], dtypes.string)", + "docstring": "A that list the files according to the input patterns.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\matching_files.py", + "ast_data": "ClassDef name:MatchingFilesDataset FunctionDef name:__init__ arg:self arg:patterns arguments arg arg Assign Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "tocoo", + "source_code": "def tocoo(self, copy=True):\n M, N = self.shape\n R, C = self.blocksize\n indptr_diff = np.diff(self.indptr)\n if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:\n indptr_diff_limited = indptr_diff.astype(np.intp)\n if np.any(indptr_diff_limited != indptr_diff):\n raise ValueError('Matrix too big to convert')\n indptr_diff = indptr_diff_limited\n idx_dtype = self._get_index_dtype(maxval=max(M, N))\n row = (R * np.arange(M // R, dtype=idx_dtype)).repeat(indptr_diff)\n row = row.repeat(R * C).reshape(-1, R, C)\n row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1, 1), (1, C))\n row = row.reshape(-1)\n col = (C * self.indices).astype(idx_dtype, copy=False).repeat(R * C).reshape(-1, R, C)\n col += np.tile(np.arange(C, dtype=idx_dtype), (R, 1))\n col = col.reshape(-1)\n data = self.data.reshape(-1)\n if copy:\n data = data.copy()\n return self._coo_container((data, (row, col)), shape=self.shape)", + "docstring": "Convert this array/matrix to COOrdinate format. When copy=False the data array will be shared between this array/matrix and the resultant coo_array/coo_matrix.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_bsr.py", + "ast_data": "FunctionDef name:tocoo arg:self arg:copy arguments arg arg Assign Assign Assign Call If Compare Call Assign Call If Call Compare Raise Call Assign Assign Call Call Assign Call Call Assign Call Call Call Call Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call If Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "sawtooth", + "source_code": "def sawtooth(t, width=1):\n t, w = (asarray(t), asarray(width))\n w = asarray(w + (t - t))\n t = asarray(t + (w - w))\n y = zeros(t.shape, dtype='d')\n mask1 = (w > 1) | (w < 0)\n place(y, mask1, nan)\n tmod = mod(t, 2 * pi)\n mask2 = 1 - mask1 & (tmod < w * 2 * pi)\n tsub = extract(mask2, tmod)\n wsub = extract(mask2, w)\n place(y, mask2, tsub / (pi * wsub) - 1)\n mask3 = 1 - mask1 & 1 - mask2\n tsub = extract(mask3, tmod)\n wsub = extract(mask3, w)\n place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))\n return y", + "docstring": "Return a periodic sawtooth or triangle waveform. The sawtooth waveform has a period `widthwidth` = 0.5 produces a triangle wave. If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the sawtooth waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500) >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_waveforms.py", + "ast_data": "FunctionDef name:sawtooth arg:t arg:width arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Call Assign Call Assign Compare Assign Call Assign Call Call Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "User", + "source_code": "class User(AbstractUser):\n\n class Meta(AbstractUser.Meta):\n swappable = 'AUTH_USER_MODEL'", + "docstring": "Users within the Django authentication system are represented by this model. Username and password are required. Other fields are optional.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\models.py", + "ast_data": "ClassDef name:User ClassDef name:Meta Assign" + }, + { + "library": "pandas", + "name": "flags", + "source_code": "@final\n@property\ndef flags(self) -> Flags:\n return self._flags", + "docstring": "Get the properties associated with this pandas object. The available flags are * :attr: See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- \"Flags\" differ from \"metadata\". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2]}) >>> df.flags Flags can be get or set using `` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags[\"allows_duplicate_labels\"] False >>> df.flags[\"allows_duplicate_labels\"] = True", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:flags arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, limit=100, history_file_path=None):\n self._commands = []\n self._limit = limit\n self._history_file_path = history_file_path or self._get_default_history_file_path()\n self._load_history_from_file()", + "docstring": "CommandHistory constructor. Args: limit: Maximum number of the most recent commands that this instance keeps track of, as an int. history_file_path: (str) Manually specified path to history file. Used in testing.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:limit arg:history_file_path arguments arg arg arg Assign Assign Assign BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "isValueType", + "source_code": "def isValueType(typ: CType, properties: LazyIrProperties | None=None) -> bool:\n if isinstance(typ, BaseCType):\n treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants\n return typ.type == getValueT() or (typ.type == scalarT and (not treat_scalars_as_constants)) or typ.type == SymIntT\n elif typ == VectorCType(BaseCType(SymIntT)):\n return False\n elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):\n return isValueType(typ.elem, properties)\n return False", + "docstring": "Given a type, determine if it is a Value-like type. This is equivalent to being Tensor-like, but assumes the type has already been transformed.", + "type": "function", + "file_path": "pytorch\\torchgen\\api\\lazy.py", + "ast_data": "FunctionDef name:isValueType arg:typ arg:properties arguments arg arg If Call Assign BoolOp Return return:yes BoolOp Compare Call BoolOp Compare Compare If Compare Call Call Return return:yes If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_unflatten_optim_state", + "source_code": "def _unflatten_optim_state(fsdp_param_info: FSDPParamInfo, flat_param_state: dict[str, Any], to_save: bool, shard_state: bool, cpu_offload: bool) -> list[dict[str, Any]]:\n assert not shard_state or to_save, 'If ``shard_state`` is True, ``to_save`` has to be True.'\n consolidated_state = _communicate_optim_state(fsdp_param_info, flat_param_state)\n if to_save:\n unflat_param_state = _unflatten_communicated_optim_state(fsdp_param_info, consolidated_state, shard_state)\n for optim_state in unflat_param_state:\n if cpu_offload:\n for key in list(optim_state.keys()):\n state = optim_state[key]\n if not isinstance(state, torch.Tensor):\n continue\n optim_state[key] = state.cpu()\n return unflat_param_state\n else:\n return []", + "docstring": "Unflattens the optimizer state, consisting of the \"state\" part and the \"param_groups\" part. Unflattening the \"state\" part involves consolidating the state on the target rank and remapping from flattened to unflattened parameter IDs, and the \"param_groups\" part only involves remapping from flattened to unflattened parameter IDs. Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. flat_param_state (Dict[str, Any]): Entry for the flat parameter in the \"state\" part of the optimizer state dict. to_save (bool): Whether to save the state on this rank. Returns: List[Dict[str, Any]]: A :class: holding the entries in the \"state\" part of the optimizer state dict corresponding to the unflattened parameters comprising the flat parameter if on the target rank or an empty :class: otherwise. The final optimizer state dict will need to map these entries using the proper unflattened parameter IDs.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py", + "ast_data": "FunctionDef name:_unflatten_optim_state arg:fsdp_param_info arg:flat_param_state arg:to_save arg:shard_state arg:cpu_offload arguments arg arg arg arg arg BoolOp Assign Call If Assign Call For If For Call Call Assign If Call Assign Call Return return:yes Return return:no" + }, + { + "library": "scikit-learn", + "name": "Sum", + "source_code": "class Sum(KernelOperator):\n\n def __call__(self, X, Y=None, eval_gradient=False):\n if eval_gradient:\n K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n return (K1 + K2, np.dstack((K1_gradient, K2_gradient)))\n else:\n return self.k1(X, Y) + self.k2(X, Y)\n\n def diag(self, X):\n return self.k1.diag(X) + self.k2.diag(X)\n\n def __repr__(self):\n return '{0} + {1}'.format(self.k1, self.k2)", + "docstring": "The kernel takes two kernels :math: and :math: and combines them via .. math:: k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y) Note that the magic method is overridden, so is equivalent to using the + operator with . Read more in the :ref:. .. versionadded:: 0.18 Parameters ---------- k1 : Kernel The first base-kernel of the sum-kernel k2 : Kernel The second base-kernel of the sum-kernel Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = Sum(ConstantKernel(2), RBF()) >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 1.0 >>> kernel 1.41**2 + RBF(length_scale=1)", + "type": "class", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "ClassDef name:Sum FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Call Assign Call Return return:yes Call Return return:yes Call Call FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_aot_graph_name", + "source_code": "def get_aot_graph_name() -> str:\n global model_name, graph_being_compiled, nth_graph\n return f'{model_name}__{'_'.join(graph_being_compiled)}_{nth_graph}'", + "docstring": "Returns the name of the graph being compiled.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\logging_utils.py", + "ast_data": "FunctionDef name:get_aot_graph_name arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X):\n check_is_fitted(self)\n xp, _ = get_namespace(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite='allow-nan')\n if sparse.issparse(X):\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n return X", + "docstring": "Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "run", + "source_code": "def run(self):\n env = self.environ.get\n local = httputil.Host('', int(env('SERVER_PORT', 80) or -1), env('SERVER_NAME', ''))\n remote = httputil.Host(env('REMOTE_ADDR', ''), int(env('REMOTE_PORT', -1) or -1), env('REMOTE_HOST', ''))\n scheme = env('wsgi.url_scheme')\n sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1')\n request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)\n request.login = env('LOGON_USER') or env('REMOTE_USER') or None\n request.multithread = self.environ['wsgi.multithread']\n request.multiprocess = self.environ['wsgi.multiprocess']\n request.wsgi_environ = self.environ\n request.prev = env('cherrypy.previous_request', None)\n meth = self.environ['REQUEST_METHOD']\n path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''), self.environ.get('PATH_INFO', ''))\n qs = self.environ.get('QUERY_STRING', '')\n path, qs = self.recode_path_qs(path, qs) or (path, qs)\n rproto = self.environ.get('SERVER_PROTOCOL')\n headers = self.translate_headers(self.environ)\n rfile = self.environ['wsgi.input']\n request.run(meth, path, qs, rproto, headers, rfile)", + "docstring": "Create a Request object using environ.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpwsgi.py", + "ast_data": "FunctionDef name:run arg:self arguments arg Assign Assign Call Call BoolOp Call Call Assign Call Call Call BoolOp Call Call Assign Call Assign Call Assign Call Assign BoolOp Call Call Assign Assign Assign Assign Call Assign Assign Call Call Call Assign Call Assign BoolOp Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "replace_with", + "source_code": "def replace_with(self, new_node: torch.fx.Node) -> None:\n graph = new_node.graph\n if len(self.nodes) == 1:\n mm_node = self.nodes[0]\n assert mm_node.target in (aten.mm.default, aten._scaled_mm.default)\n mm_node.replace_all_uses_with(new_node)\n graph.erase_node(mm_node)\n return\n graph = new_node.graph\n assert len(self.nodes) == 3\n mm_node = self.nodes[1]\n output_reshape_node = self.nodes[2]\n assert mm_node.target in (aten.mm.default, aten._scaled_mm.default)\n assert output_reshape_node.target == aten.reshape.default\n output_reshape_node.replace_all_uses_with(new_node)\n if len(mm_node.users) > 1:\n with graph.inserting_after(new_node):\n new_mm_node = graph.call_function(aten.reshape.default, args=(new_node, list(_get_tensor(mm_node).shape)))\n mm_node.replace_all_uses_with(new_mm_node)", + "docstring": "Replace the matmul with the new node.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py", + "ast_data": "FunctionDef name:replace_with arg:self arg:new_node arguments arg arg Assign If Compare Call Assign Compare Call Call Return return:no Assign Compare Call Assign Assign Compare Compare Call If Compare Call With Call Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_fftn", + "source_code": "def _fftn(input_tensor, fft_length=None, axes=None, norm=None, name=None):\n with _ops.name_scope(name, default_name, [input_tensor, fft_length, axes]) as name:\n axes = _process_empty_axes(input_tensor, axes)\n fft_rank = axes.shape[0]\n input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64)\n input_tensor.shape.with_rank_at_least(fft_rank)\n if fft_length is None:\n fft_length = _infer_fft_length_for_fftn(input_tensor)\n else:\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n if norm is None:\n norm = 'backward'\n n = 1\n if norm != 'backward':\n for fft_length_i in fft_length:\n n *= fft_length_i\n if norm == 'forward':\n input_tensor /= n\n elif norm == 'ortho':\n input_tensor /= np.sqrt(n)\n return fft_n(input_tensor, fft_length, axes, name=name)", + "docstring": "Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_fftn arg:input_tensor arg:fft_length arg:axes arg:norm arg:name arguments arg arg arg arg arg With Call Assign Call Assign Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Assign If Compare For If Compare If Compare Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "visit", + "source_code": "@classmethod\ndef visit(cls, fn: Callable[['VariableTracker'], None], value: Any, cache: Optional[dict[int, Any]]=None) -> None:\n if cache is None:\n cache = {}\n idx = id(value)\n if idx in cache:\n return\n cache[idx] = value\n if isinstance(value, VariableTracker):\n value = value.unwrap()\n fn(value)\n value = value.unwrap()\n nonvars = value._nonvar_fields\n for key, subvalue in value.__dict__.items():\n if key not in nonvars:\n cls.visit(fn, subvalue, cache)\n elif istype(value, (list, tuple)):\n for subvalue in value:\n cls.visit(fn, subvalue, cache)\n elif istype(value, (dict, collections.OrderedDict)):\n for subvalue in value.values():\n cls.visit(fn, subvalue, cache)", + "docstring": "Walk value and call fn on all the VariableTracker instances", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "FunctionDef name:visit arg:cls arg:fn arg:value arg:cache arguments arg arg arg arg If Compare Assign Assign Call If Compare Return return:no Assign If Call Assign Call Call Assign Call Assign For Call If Compare Call If Call For Call If Call For Call Call" + }, + { + "library": "tensorflow", + "name": "log_softmax", + "source_code": "@tf_export(v1=['nn.log_softmax', 'math.log_softmax'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'dim is deprecated, use axis instead', 'dim')\ndef log_softmax(logits, axis=None, name=None, dim=None):\n axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)\n if axis is None:\n axis = -1\n return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)", + "docstring": "Computes log softmax activations. For each batch and class we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for . Returns: A . Has the same type as . Same shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:log_softmax arg:logits arg:axis arg:name arg:dim arguments arg arg arg arg Assign Call If Compare Assign Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "median_abs_deviation", + "source_code": "def median_abs_deviation(x, axis=0, center=np.median, scale=1.0, nan_policy='propagate'):\n if not callable(center):\n raise TypeError(f\"The argument 'center' must be callable. The given value {repr(center)} is not callable.\")\n if isinstance(scale, str):\n if scale.lower() == 'normal':\n scale = 0.6744897501960817\n else:\n raise ValueError(f'{scale} is not a valid scale value.')\n x = asarray(x)\n if not x.size:\n if axis is None:\n return np.nan\n nan_shape = tuple((item for i, item in enumerate(x.shape) if i != axis))\n if nan_shape == ():\n return np.nan\n return np.full(nan_shape, np.nan)\n contains_nan = _contains_nan(x, nan_policy)\n if contains_nan:\n if axis is None:\n mad = _mad_1d(x.ravel(), center, nan_policy)\n else:\n mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)\n elif axis is None:\n med = center(x, axis=None)\n mad = np.median(np.abs(x - med))\n else:\n med = np.expand_dims(center(x, axis=axis), axis)\n mad = np.median(np.abs(x - med), axis=axis)\n return mad / scale", + "docstring": "Compute the median absolute deviation of the data along the given axis. The median absolute deviation (MAD, [1]_) computes the median over the absolute deviations from the median. It is a measure of dispersion similar to the standard deviation but more robust to outliers [2]_. The MAD of an empty array is `scalexaxiscenterinfcenterinfnanmedian_abs_deviation`, the latter is affected when we change a single value of an array to have an outlier value while the MAD hardly changes: >>> import numpy as np >>> from scipy import stats >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456) >>> x.std() 0.9973906394005013 >>> stats.median_abs_deviation(x) 0.82832610097857 >>> x[0] = 345.6 >>> x.std() 34.42304872314415 >>> stats.median_abs_deviation(x) 0.8323442311590675 Axis handling example: >>> x = np.array([[10, 7, 4], [3, 2, 1]]) >>> x array([[10, 7, 4], [ 3, 2, 1]]) >>> stats.median_abs_deviation(x) array([3.5, 2.5, 1.5]) >>> stats.median_abs_deviation(x, axis=None) 2.0 Scale normal example: >>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456) >>> stats.median_abs_deviation(x) 1.3487398527041636 >>> stats.median_abs_deviation(x, scale='normal') 1.9996446978061115", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:median_abs_deviation arg:x arg:axis arg:center arg:scale arg:nan_policy arguments arg arg arg arg arg If Call Raise Call Call If Call If Compare Call Assign Raise Call Assign Call If If Compare Return return:yes Assign Call Call Compare If Compare Return return:yes Return return:yes Call Assign Call If If Compare Assign Call Call Assign Call If Compare Assign Call Assign Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "dissoc", + "source_code": "def dissoc(d, *keys, **kwargs):\n factory = _get_factory(dissoc, kwargs)\n d2 = factory()\n if len(keys) < len(d) * 0.6:\n d2.update(d)\n for key in keys:\n if key in d2:\n del d2[key]\n else:\n remaining = set(d)\n remaining.difference_update(keys)\n for k in remaining:\n d2[k] = d[k]\n return d2", + "docstring": "Return a new dict with the given key(s) removed. New dict has d[key] deleted for each supplied key. Does not modify the initial dictionary. >>> dissoc({\"x\": 1, \"y\": 2}, \"y\") {'x': 1} >>> dissoc({\"x\": 1, \"y\": 2}, \"y\", \"x\") {} >>> dissoc({\"x\": 1}, \"y\") # Ignores missing keys {'x': 1}", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", + "ast_data": "FunctionDef name:dissoc arg:d arguments arg arg arg Assign Call Assign Call If Compare Call Call Call For If Compare Assign Call Call For Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_safe_assign", + "source_code": "def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):\n row_indexer = slice(None, None, None) if row_indexer is None else row_indexer\n column_indexer = slice(None, None, None) if column_indexer is None else column_indexer\n if hasattr(X, 'iloc'):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n X.iloc[row_indexer, column_indexer] = values\n else:\n X[row_indexer, column_indexer] = values", + "docstring": "Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to . row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If , all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If , all columns are selected.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_indexing.py", + "ast_data": "FunctionDef name:_safe_assign arg:X arg:values arguments arg arg arg arg Assign Compare Call Assign Compare Call If Call With Call Call Assign Assign" + }, + { + "library": "pytorch", + "name": "_check_tensor_all", + "source_code": "def _check_tensor_all(cond, message=None):\n _check_tensor_all_with(RuntimeError, cond, message)", + "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `torch.Tensor`", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:_check_tensor_all arg:cond arg:message arguments arg arg Call" + }, + { + "library": "pandas", + "name": "render_pep440", + "source_code": "def render_pep440(pieces):\n if pieces['closest-tag']:\n rendered = pieces['closest-tag']\n if pieces['distance'] or pieces['dirty']:\n rendered += plus_or_dot(pieces)\n rendered += f'{pieces['distance']}.g{pieces['short']}'\n if pieces['dirty']:\n rendered += '.dirty'\n else:\n rendered = f'0+untagged.{pieces['distance']}.g{pieces['short']}'\n if pieces['dirty']:\n rendered += '.dirty'\n return rendered", + "docstring": "Build up version string, with post-release \"local version identifier\". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:render_pep440 arg:pieces arguments arg If Assign If BoolOp Call If Assign If Return return:yes" + }, + { + "library": "django", + "name": "get_formats", + "source_code": "def get_formats():\n FORMAT_SETTINGS = ('DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS')\n return {attr: get_format(attr) for attr in FORMAT_SETTINGS}", + "docstring": "Return all formats strings required for i18n to work.", + "type": "function", + "file_path": "django\\django\\views\\i18n.py", + "ast_data": "FunctionDef name:get_formats arguments Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "GlorotUniform", + "source_code": "@tf_export(v1=['glorot_uniform_initializer', 'initializers.glorot_uniform'])\n@deprecation.deprecated_endpoints('glorot_uniform_initializer', 'initializers.glorot_uniform')\nclass GlorotUniform(VarianceScaling):\n\n @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n def __init__(self, seed=None, dtype=dtypes.float32):\n super(GlorotUniform, self).__init__(scale=1.0, mode='fan_avg', distribution='uniform', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed, 'dtype': self.dtype.name}", + "docstring": "The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor and is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See for behavior. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010]( ([pdf](", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "ClassDef name:GlorotUniform FunctionDef name:__init__ arg:self arg:seed arg:dtype arguments arg arg arg Call Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "__add__", + "source_code": "def __add__(self, rhs):\n if isinstance(rhs, str):\n t = super().__add__(rhs)\n if isinstance(rhs, SafeData):\n t = SafeString(t)\n return t\n return NotImplemented", + "docstring": "Concatenating a safe string with another safe bytestring or safe string is safe. Otherwise, the result is no longer safe.", + "type": "method", + "file_path": "django\\django\\utils\\safestring.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:rhs arguments arg arg If Call Assign Call Call If Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n if self.weights == 'uniform':\n neigh_ind = self.kneighbors(X, return_distance=False)\n neigh_dist = None\n else:\n neigh_dist, neigh_ind = self.kneighbors(X)\n weights = _get_weights(neigh_dist, self.weights)\n _y = self._y\n if _y.ndim == 1:\n _y = _y.reshape((-1, 1))\n if weights is None:\n y_pred = np.mean(_y[neigh_ind], axis=1)\n else:\n y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)\n denom = np.sum(weights, axis=1)\n for j in range(_y.shape[1]):\n num = np.sum(_y[neigh_ind, j] * weights, axis=1)\n y_pred[:, j] = num / denom\n if self._y.ndim == 1:\n y_pred = y_pred.ravel()\n return y_pred", + "docstring": "Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If , predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int Target values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_regression.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg If Compare Assign Call Assign Assign Call Assign Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call For Call Assign Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "range_input_producer", + "source_code": "@tf_export(v1=['train.range_input_producer'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.range(limit).shuffle(limit).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.')\ndef range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, shared_name=None, name=None):\n with ops.name_scope(name, 'input_producer', [limit]) as name:\n range_tensor = math_ops.range(limit)\n return input_producer(range_tensor, [], num_epochs, shuffle, seed, capacity, shared_name, 'fraction_of_%d_full' % capacity, name)", + "docstring": "Produces the integers from 0 to limit-1 in a queue. Note: if is not , this function creates local counter . Use to initialize local variables. Args: limit: An int32 scalar tensor. num_epochs: An integer (optional). If specified, produces each integer times before generating an OutOfRange error. If not specified, can cycle through the integers an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A Queue with the output integers. A for the Queue is added to the current 's collection. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:range_input_producer arg:limit arg:num_epochs arg:shuffle arg:seed arg:capacity arg:shared_name arg:name arguments arg arg arg arg arg arg arg With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_TensorIterator", + "source_code": "class _TensorIterator(object):\n __slots__ = ['_tensor', '_index', '_limit']\n\n def __init__(self, tensor, dim0):\n self._tensor = tensor\n self._index = 0\n self._limit = dim0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index == self._limit:\n raise StopIteration\n result = self._tensor[self._index]\n self._index += 1\n return result\n next = __next__", + "docstring": "Iterates over the leading dim of a Tensor. Performs no error checks.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "ClassDef name:_TensorIterator Assign FunctionDef name:__init__ arg:self arg:tensor arg:dim0 arguments arg arg arg Assign Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg If Compare Raise Assign Return return:yes Assign" + }, + { + "library": "pytorch", + "name": "graph_copy", + "source_code": "@compatibility(is_backward_compatible=True)\ndef graph_copy(self, g: 'Graph', val_map: dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':\n for node in g.nodes:\n if node in val_map:\n continue\n if node.op == 'output':\n rv = map_arg(node.args[0], lambda n: val_map[n])\n return rv if not return_output_node else (rv, node)\n val_map[node] = self.node_copy(node, lambda n: val_map[n])\n return None", + "docstring": "Copy all nodes from a given graph into `` otherwise.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:graph_copy arg:self arg:g arg:val_map arg:return_output_node arguments arg arg arg arg For If Compare If Compare Assign Call arguments arg Return return:yes Assign Call arguments arg Return return:no Call" + }, + { + "library": "tensorflow", + "name": "_save", + "source_code": "def _save(input_dataset, path, compression=None, shard_func=None, checkpoint_args=None):\n if context.executing_eagerly() and checkpoint_args:\n save_dataset = _SaveDataset(input_dataset, path, shard_func, compression)\n save_iterator = iter(save_dataset)\n if 'checkpoint' in checkpoint_args:\n raise ValueError(\"'Invalid `checkpoint_args`. `checkpoint_args` are not allowed to include 'checkpoint'.\")\n checkpoint = checkpoint_lib.Checkpoint(iterator=save_iterator)\n checkpoint_args['checkpoint'] = checkpoint\n manager = checkpoint_management.CheckpointManager(**checkpoint_args)\n checkpoint.restore(manager.latest_checkpoint)\n for _ in enumerate(save_iterator):\n if 'step_counter' in checkpoint_args:\n checkpoint_args['step_counter'].assign_add(delta=1)\n manager.save(check_interval=True)\n else:\n dataset, shard_func, use_shard_func, path = set_save_dataset_attributes(input_dataset, shard_func, path)\n return ged_ops.save_dataset(dataset._variant_tensor, path=path, shard_func_other_args=shard_func.captured_inputs, compression=compression, shard_func=shard_func, use_shard_func=use_shard_func)", + "docstring": "Implements the save function and checkpoint functionality.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\save_op.py", + "ast_data": "FunctionDef name:_save arg:input_dataset arg:path arg:compression arg:shard_func arg:checkpoint_args arguments arg arg arg arg arg If BoolOp Call Assign Call Assign Call If Compare Raise Call Assign Call Assign Assign Call Call For Call If Compare Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_context_data", + "source_code": "def get_context_data(self, *, object_list=None, **kwargs):\n queryset = object_list if object_list is not None else self.object_list\n page_size = self.get_paginate_by(queryset)\n context_object_name = self.get_context_object_name(queryset)\n if page_size:\n paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n context = {'paginator': paginator, 'page_obj': page, 'is_paginated': is_paginated, 'object_list': queryset}\n else:\n context = {'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': queryset}\n if context_object_name is not None:\n context[context_object_name] = queryset\n context.update(kwargs)\n return super().get_context_data(**context)", + "docstring": "Get the context for this view.", + "type": "method", + "file_path": "django\\django\\views\\generic\\list.py", + "ast_data": "FunctionDef name:get_context_data arg:self arguments arg arg arg Assign Compare Assign Call Assign Call If Assign Call Assign Assign If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_setup_countdown_if_has_grace_period_and_not_already_counting_down", + "source_code": "def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):\n if self._grace_period > 0 and (not self._final_checkpoint_countdown):\n buffer_factor = 3\n self._target_time_for_termination = self._received_own_sigterm_time + self._grace_period - buffer_factor * self._estimated_run_time * 2", + "docstring": "Set up at the beginning of a countdown period for long grace period.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "FunctionDef name:_setup_countdown_if_has_grace_period_and_not_already_counting_down arg:self arguments arg If BoolOp Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "_retrieve_recv_grads", + "source_code": "def _retrieve_recv_grads(self, bwd_chunk_id: int):\n recv_infos = self.grad_recv_info[bwd_chunk_id]\n grads = self._map_tensor_from_recv_info(recv_infos)\n return grads", + "docstring": "Retrieve the gradients received for the current stage during backward.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:_retrieve_recv_grads arg:self arg:bwd_chunk_id arguments arg arg Assign Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "min", + "source_code": "@_period_dispatch\ndef min(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs):\n nv.validate_min((), kwargs)\n nv.validate_minmax_axis(axis, self.ndim)\n result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)\n return self._wrap_reduction_result(axis, result)", + "docstring": "Return the minimum value of the Array or minimum along an axis. See Also -------- numpy.ndarray.min Index.min : Return the minimum value in an Index. Series.min : Return the minimum value in a Series.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:min arg:self arguments arg arg arg arg Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "BoundWidget", + "source_code": "@html_safe\nclass BoundWidget:\n\n def __init__(self, parent_widget, data, renderer):\n self.parent_widget = parent_widget\n self.data = data\n self.renderer = renderer\n\n def __str__(self):\n return self.tag(wrap_label=True)\n\n def tag(self, wrap_label=False):\n context = {'widget': {**self.data, 'wrap_label': wrap_label}}\n return self.parent_widget._render(self.template_name, context, self.renderer)\n\n @property\n def template_name(self):\n if 'template_name' in self.data:\n return self.data['template_name']\n return self.parent_widget.template_name\n\n @property\n def id_for_label(self):\n return self.data['attrs'].get('id')\n\n @property\n def choice_label(self):\n return self.data['label']", + "docstring": "A container class used for iterating over widgets. This is useful for widgets that have choices. For example, the following can be used in a template: {% for radio in myform.beatles %} {{ radio.choice_label }} {{ radio.tag }} {% endfor %}", + "type": "class", + "file_path": "django\\django\\forms\\boundfield.py", + "ast_data": "ClassDef name:BoundWidget FunctionDef name:__init__ arg:self arg:parent_widget arg:data arg:renderer arguments arg arg arg arg Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:tag arg:self arg:wrap_label arguments arg arg Assign Return return:yes Call FunctionDef name:template_name arg:self arguments arg If Compare Return return:yes Return return:yes FunctionDef name:id_for_label arg:self arguments arg Return return:yes Call FunctionDef name:choice_label arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "decode", + "source_code": "def decode(encoding=None, default_encoding='utf-8'):\n body = cherrypy.request.body\n if encoding is not None:\n if not isinstance(encoding, list):\n encoding = [encoding]\n body.attempt_charsets = encoding\n elif default_encoding:\n if not isinstance(default_encoding, list):\n default_encoding = [default_encoding]\n body.attempt_charsets = body.attempt_charsets + default_encoding", + "docstring": "Replace or extend the list of charsets used to decode a request entity. Either argument may be a single string or a list of strings. encoding If not None, restricts the set of charsets attempted while decoding a request entity to the given set (even if a different charset is given in the Content-Type request header). default_encoding Only in effect if the 'encoding' argument is not given. If given, the set of charsets attempted while decoding a request entity is *extended* with the given value(s).", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\encoding.py", + "ast_data": "FunctionDef name:decode arg:encoding arg:default_encoding arguments arg arg Assign If Compare If Call Assign Assign If If Call Assign Assign" + }, + { + "library": "pytorch", + "name": "synchronize", + "source_code": "def synchronize(store, data: bytes, rank: int, world_size: int, key_prefix: str, timeout: float=300) -> list[bytes]:\n with store_timeout(store, timeout):\n store.set(f'{key_prefix}{rank}', data)\n agent_data = get_all(store, rank, key_prefix, world_size)\n return agent_data", + "docstring": "Synchronizes `` will be available on each of the agents. Note: The data on the path is not deleted, as a result there can be stale data if you use the same key_prefix twice. Time complexity: O(N) per worker, O(N^2) globally.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py", + "ast_data": "FunctionDef name:synchronize arg:store arg:data arg:rank arg:world_size arg:key_prefix arg:timeout arguments arg arg arg arg arg arg With Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "PerWorkerDistributedIterator", + "source_code": "class PerWorkerDistributedIterator(PerWorkerValues):\n\n def __next__(self):\n return self.get_next()\n\n def get_next(self, name=None):\n raise NotImplementedError('Iterating over an `AsyncDistributedIterator` is not supported right now.')", + "docstring": "Distributed iterator for .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "ClassDef name:PerWorkerDistributedIterator FunctionDef name:__next__ arg:self arguments arg Return return:yes Call FunctionDef name:get_next arg:self arg:name arguments arg arg Raise Call" + }, + { + "library": "pandas", + "name": "get_op_result_name", + "source_code": "def get_op_result_name(left, right):\n if isinstance(right, (ABCSeries, ABCIndex)):\n name = _maybe_match_name(left, right)\n else:\n name = left.name\n return name", + "docstring": "Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\common.py", + "ast_data": "FunctionDef name:get_op_result_name arg:left arg:right arguments arg arg If Call Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_update", + "source_code": "def _update(self, level: int) -> None:\n sl = level + 1\n stack = inspect.stack()\n try:\n self._get_vars(stack[:sl], scopes=['locals'])\n finally:\n del stack[:], stack", + "docstring": "Update the current scope by going back levels. Parameters ---------- level : int", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\scope.py", + "ast_data": "FunctionDef name:_update arg:self arg:level arguments arg arg Assign Assign Call Try Call" + }, + { + "library": "scikit-learn", + "name": "isotonic_regression", + "source_code": "@validate_params({'y': ['array-like'], 'sample_weight': ['array-like', None], 'y_min': [Interval(Real, None, None, closed='both'), None], 'y_max': [Interval(Real, None, None, closed='both'), None], 'increasing': ['boolean']}, prefer_skip_nested_validation=True)\ndef isotonic_regression(y, *, sample_weight=None, y_min=None, y_max=None, increasing=True):\n y = check_array(y, ensure_2d=False, input_name='y', dtype=[np.float64, np.float32])\n if sp_base_version >= parse_version('1.12.0'):\n res = optimize.isotonic_regression(y=y, weights=sample_weight, increasing=increasing)\n y = np.asarray(res.x, dtype=y.dtype)\n else:\n order = np.s_[:] if increasing else np.s_[::-1]\n y = np.array(y[order], dtype=y.dtype)\n sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)\n sample_weight = np.ascontiguousarray(sample_weight[order])\n _inplace_contiguous_isotonic_regression(y, sample_weight)\n y = y[order]\n if y_min is not None or y_max is not None:\n if y_min is None:\n y_min = -np.inf\n if y_max is None:\n y_max = np.inf\n np.clip(y, y_min, y_max, y)\n return y", + "docstring": "Solve the isotonic regression model. Read more in the :ref:. Parameters ---------- y : array-like of shape (n_samples,) The data. sample_weight : array-like of shape (n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : float, default=None Lower bound on the lowest predicted value (the minimum value may still be higher). If not set, defaults to -inf. y_max : float, default=None Upper bound on the highest predicted value (the maximum may still be lower). If not set, defaults to +inf. increasing : bool, default=True Whether to compute `` is increasing (if set to True) or decreasing (if set to False). Returns ------- y_ : ndarray of shape (n_samples,) Isotonic fit of y. References ---------- \"Active set algorithms for isotonic regression; A unifying framework\" by Michael J. Best and Nilotpal Chakravarti, section 3. Examples -------- >>> from sklearn.isotonic import isotonic_regression >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) array([2.75 , 2.75 , 2.75 , 2.75 , 7.33, 7.33, 7.33, 7.33, 7.33, 7.33])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:isotonic_regression arg:y arguments arg arg arg arg arg Assign Call If Compare Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign If BoolOp Compare Compare If Compare Assign If Compare Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_RemoveExternalControlEdges", + "source_code": "def _RemoveExternalControlEdges(self, op: ops.Operation):\n internal_control_inputs = []\n external_control_inputs = []\n for x in op.control_inputs:\n is_internal_op = False\n ctxt = x._get_control_flow_context()\n while ctxt is not None:\n if ctxt == self:\n is_internal_op = True\n break\n ctxt = ctxt._outer_context\n if is_internal_op:\n internal_control_inputs.append(x)\n else:\n external_control_inputs.append(x)\n op._remove_all_control_inputs()\n op._add_control_inputs(internal_control_inputs)\n return (internal_control_inputs, external_control_inputs)", + "docstring": "Remove any external control dependency on this op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py", + "ast_data": "FunctionDef name:_RemoveExternalControlEdges arg:self arg:op arguments arg arg Assign Assign For Assign Assign Call While Compare If Compare Assign Assign If Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "request_watch", + "source_code": "def request_watch(self, node_name, output_slot, debug_op, breakpoint=False):\n self._debug_ops_state_change_queue.put(_state_change(debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE if breakpoint else debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY, node_name, output_slot, debug_op))", + "docstring": "Request enabling a debug tensor watchpoint or breakpoint. This will let the server send a EventReply to the client side (i.e., the debugged TensorFlow runtime process) to request adding a watch key (i.e., ::) to the list of enabled watch keys. The list applies only to debug ops with the attribute gated_grpc=True. To disable the watch, use . Args: node_name: () name of the node that the to-be-watched tensor belongs to, e.g., \"hidden/Weights\". output_slot: () output slot index of the tensor to watch. debug_op: () name of the debug op to enable. This should not include any attribute substrings. breakpoint: () Iff , the debug op will block and wait until it receives an response from the server. The proto may carry a TensorProto that modifies the value of the debug op's output tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py", + "ast_data": "FunctionDef name:request_watch arg:self arg:node_name arg:output_slot arg:debug_op arg:breakpoint arguments arg arg arg arg arg Call Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, Y, threshold=None):\n check_is_fitted(self)\n if threshold is None:\n threshold = (self.pos_label + self.neg_label) / 2.0\n if self.y_type_ == 'multiclass':\n y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n else:\n y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold)\n if self.sparse_input_:\n y_inv = sp.csr_matrix(y_inv)\n elif sp.issparse(y_inv):\n y_inv = y_inv.toarray()\n return y_inv", + "docstring": "Transform binary labels back to multi-class labels. Parameters ---------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float, default=None Threshold used in the binary and multi-label cases. Use 0 when `decision_functionpredict_probainverse_transformdecision_functioninverse_transform`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:Y arg:threshold arguments arg arg arg Call If Compare Assign If Compare Assign Call Assign Call If Assign Call If Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "set", + "source_code": "def set(self, value: Any, priority: int) -> None:\n if priority >= self.priority:\n if isinstance(self.value, BaseSettings):\n value = BaseSettings(value, priority=priority)\n self.value = value\n self.priority = priority", + "docstring": "Sets value if priority is higher or equal than current priority.", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:set arg:self arg:value arg:priority arguments arg arg arg If Compare If Call Assign Call Assign Assign" + }, + { + "library": "kornia", + "name": "AutoAugment", + "source_code": "class AutoAugment(PolicyAugmentBase):\n\n def __init__(self, policy: Union[str, List[SUBPOLICY_CONFIG]]='imagenet', transformation_matrix_mode: str='silent') -> None:\n if policy == 'imagenet':\n _policy = imagenet_policy\n elif policy == 'cifar10':\n _policy = cifar10_policy\n elif policy == 'svhn':\n _policy = svhn_policy\n elif isinstance(policy, (list, tuple)):\n _policy = policy\n else:\n raise NotImplementedError(f'Invalid policy `{policy}`.')\n super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)\n selection_weights = tensor([1.0 / len(self)] * len(self))\n self.rand_selector = Categorical(selection_weights)\n\n def compose_subpolicy_sequential(self, subpolicy: SUBPOLICY_CONFIG) -> PolicySequential:\n return PolicySequential(*[getattr(ops, name)(prob, mag) for name, prob, mag in subpolicy])\n\n def get_forward_sequence(self, params: Optional[List[ParamItem]]=None) -> Iterator[Tuple[str, Module]]:\n if params is None:\n idx = self.rand_selector.sample((1,))\n return self.get_children_by_indices(idx)\n return self.get_children_by_params(params)", + "docstring": "Apply AutoAugment :cite: searched strategies. Args: policy: a customized policy config or presets of \"imagenet\", \"cifar10\", and \"svhn\". transformation_matrix_mode: computation mode for the chained transformation matrix, via attribute. If , transformation matrix will be computed silently and the non-rigid modules will be ignored as identity transformations. If , transformation matrix will be computed silently and the non-rigid modules will trigger errors. If , transformation matrix will be totally ignored. Examples: >>> import torch >>> import kornia.augmentation as K >>> in_tensor = torch.rand(5, 3, 30, 30) >>> aug = K.AugmentationSequential(AutoAugment()) >>> aug(in_tensor).shape torch.Size([5, 3, 30, 30])", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\autoaugment.py", + "ast_data": "ClassDef name:AutoAugment FunctionDef name:__init__ arg:self arg:policy arg:transformation_matrix_mode arguments arg arg arg If Compare Assign If Compare Assign If Compare Assign If Call Assign Raise Call Call Call Assign Call Call Call Assign Call FunctionDef name:compose_subpolicy_sequential arg:self arg:subpolicy arguments arg arg Return return:yes Call Call Call FunctionDef name:get_forward_sequence arg:self arg:params arguments arg arg If Compare Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "OperatorSupport", + "source_code": "@compatibility(is_backward_compatible=False)\nclass OperatorSupport(OperatorSupportBase):\n _support_dict: SupportDict\n\n def __init__(self, support_dict: t.Optional[SupportDict]=None):\n self._support_dict = support_dict or {}\n\n def is_node_supported(self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n if node.op not in CALLABLE_NODE_OPS:\n return True\n target = get_node_target(submodules, node)\n if target not in self._support_dict:\n return False\n if self._support_dict[target] is None:\n return True\n args_dtypes, kwargs_dtypes = self._support_dict[target]\n for i, dtypes in enumerate(args_dtypes):\n if len(node.args) <= i:\n break\n if dtypes is None:\n continue\n if not isinstance(node.args[i], torch.fx.Node):\n continue\n arg_dtype = _get_arg_dtype(node.args[i])\n if arg_dtype not in dtypes:\n return False\n for k, dtypes in kwargs_dtypes.items():\n if k not in node.kwargs:\n continue\n if not isinstance(node.kwargs[k], torch.fx.Node):\n continue\n kwarg_dtype = _get_arg_dtype(node.kwargs[k])\n if kwarg_dtype not in dtypes:\n return False\n return True", + "docstring": "maps node.target typename to supported inputs dtypes. node.target typename is retrieved using helper function If supported inputs dtypes is None, it means any dtype is supported, else we should see a tuple like (([dtypes], ...), {\"name\":[dtypes], ...}). The first tuple ([dtypes], ...) indicates what dtypes are supported for inputs in node.args and the second dict {\"name\": [dtypes], ...} indicates what dtypes are supported for inputs in node.kwargs. For inputs in args, if we don't want to check it, we can put None there, e.g. (None, [torch.float]) indicates that we don't care about the type of the first input in args. And for inputs in kwargs, if not listed, will not be checked.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", + "ast_data": "ClassDef name:OperatorSupport FunctionDef name:__init__ arg:self arg:support_dict arguments arg arg Assign BoolOp FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes If Compare Return return:yes Assign For Call If Compare Call If Compare If Call Assign Call If Compare Return return:yes For Call If Compare If Call Assign Call If Compare Return return:yes Return return:yes Call" + }, + { + "library": "authlib", + "name": "create_authorization_url", + "source_code": "async def create_authorization_url(self, redirect_uri=None, **kwargs):\n if not self.authorize_url:\n raise RuntimeError('Missing \"authorize_url\" value')\n if self.authorize_params:\n kwargs.update(self.authorize_params)\n async with self._get_oauth_client() as client:\n client.redirect_uri = redirect_uri\n params = {}\n if self.request_token_params:\n params.update(self.request_token_params)\n request_token = await client.fetch_request_token(self.request_token_url, **params)\n log.debug(f'Fetch request token: {request_token!r}')\n url = client.create_authorization_url(self.authorize_url, **kwargs)\n state = request_token['oauth_token']\n return {'url': url, 'request_token': request_token, 'state': state}", + "docstring": "Generate the authorization url and state for HTTP redirect. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: dict", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\base_client\\async_app.py", + "ast_data": "AsyncFunctionDef name:create_authorization_url arg:self arg:redirect_uri arguments arg arg arg If Raise Call If Call Call Assign Assign If Call Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "set_environ", + "source_code": "@contextmanager\ndef set_environ(**kwargs: str) -> Iterator[None]:\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v", + "docstring": "Temporarily set environment variables inside the context manager and fully restore previous environment afterwards", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:set_environ arguments arg Assign Call Call Try For Call If Compare Assign" + }, + { + "library": "sphinx", + "name": "PyClassMethod", + "source_code": "class PyClassMethod(PyMethod):\n option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n\n def run(self) -> list[Node]:\n self.name = 'py:method'\n self.options['classmethod'] = True\n return super().run()", + "docstring": "Description of a classmethod.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py", + "ast_data": "ClassDef name:PyClassMethod Call FunctionDef name:run arg:self arguments arg Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "Operand", + "source_code": "class Operand(NamedTuple):\n op_type: int\n shape: tuple[int, ...]\n dim_order: DimOrder\n scale: float\n zero_point: int\n\n def use_nchw(self):\n if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS:\n return True\n if self.dim_order is DimOrder.CHANNELS_LAST:\n return False\n raise Exception('Unknown dim order')", + "docstring": "Represenation of an NNAPI operand.", + "type": "class", + "file_path": "pytorch\\torch\\backends\\_nnapi\\serializer.py", + "ast_data": "ClassDef name:Operand FunctionDef name:use_nchw arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "fuse_convtranspose_bn", + "source_code": "def fuse_convtranspose_bn(is_qat, convt, bn):\n assert convt.training == bn.training, 'ConvTranspose and BN both must be in the same mode (train or eval).'\n if is_qat:\n raise Exception('Fusing ConvTranspose+BatchNorm not yet supported in QAT.')\n else:\n return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True)", + "docstring": "Return the fused ConvTranspose and bn modules. Given ConvTranspose and bn modules, fuses them and returns the fused module Args: convt: Module instance of type ConvTransposeNd bn: BatchNormNd instance that needs to be fused with the linear layer. batch norm N should match the ConvTranspose N Examples:: >>> m1 = nn.ConvTranspose2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> # xdoctest: +SKIP >>> m2 = fuse_convtranspose_bn(m1, b1)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py", + "ast_data": "FunctionDef name:fuse_convtranspose_bn arg:is_qat arg:convt arg:bn arguments arg arg arg Compare If Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "save_counter", + "source_code": "@property\ndef save_counter(self):\n self._maybe_create_save_counter()\n return self._save_counter", + "docstring": "An integer variable which starts at zero and is incremented on save. Used to number checkpoints. Returns: The save counter variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:save_counter arg:self arguments arg Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_initiate_registry_from_torchlib", + "source_code": "def _initiate_registry_from_torchlib(self) -> None:\n for meta in onnxscript_apis.get_torchlib_ops():\n internal_name_instance = registration.OpName.from_qualified_name(meta.qualified_name)\n symbolic_function = registration.ONNXFunction(onnx_function=meta.function, op_full_name=internal_name_instance.qualified_name(), is_custom=False, is_complex=meta.is_complex)\n self._register(internal_name_instance, symbolic_function)", + "docstring": "Populates the registry with ATen functions from torchlib. Args: torchlib_registry: The torchlib registry to use for populating the registry.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "FunctionDef name:_initiate_registry_from_torchlib arg:self arguments arg For Call Assign Call Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "minorformatter", + "source_code": "@property\ndef minorformatter(self):\n return self.long_axis.get_minor_formatter()", + "docstring": "Minor tick for the colorbar.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:minorformatter arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, input: Tensor, offsets: Optional[Tensor]=None, per_sample_weights: Optional[Tensor]=None) -> Tensor:\n return F.embedding_bag(input, self.weight, offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights, self.include_last_offset, self.padding_idx)", + "docstring": "Forward pass of EmbeddingBag. Args: input (Tensor): Tensor containing bags of indices into the embedding matrix. offsets (Tensor, optional): Only used when :attr: is 1D. :attr: determines the starting index position of each bag (sequence) in :attr:. per_sample_weights (Tensor, optional): a tensor of float / double weights, or None to indicate all weights should be taken to be `per_sample_weightsoffsets(B, embedding_dim)inputoffsetsinput(B, N)modeoffsetsinput(N)offsetsinputoffsets(B)input` bags. Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\sparse.py", + "ast_data": "FunctionDef name:forward arg:self arg:input arg:offsets arg:per_sample_weights arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "run_ui", + "source_code": "def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True):\n print(title)\n if init_command is not None:\n self._dispatch_command(init_command)\n exit_token = self._ui_loop()\n if self._on_ui_exit:\n self._on_ui_exit()\n return exit_token", + "docstring": "Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\readline_ui.py", + "ast_data": "FunctionDef name:run_ui arg:self arg:init_command arg:title arg:title_color arg:enable_mouse_on_start arguments arg arg arg arg arg Call If Compare Call Assign Call If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_assign_modules_buffers", + "source_code": "def _assign_modules_buffers(self):\n named_module_buffers = [(buffer, buffer_name) for buffer_name, buffer in self.module.named_buffers() if buffer_name not in self.parameters_to_ignore]\n self.modules_buffers = [buffer for buffer, buffer_name in named_module_buffers]\n self.named_module_buffers = {buffer_name: buffer for buffer, buffer_name in named_module_buffers}", + "docstring": "Assign self.module.named_buffers to self.modules_buffers. Assigns module buffers to self.modules_buffers which are then used to broadcast across ranks when broadcast_buffers=True. Note that this must be called every time buffers need to be synced because buffers can be reassigned by user module, see", + "type": "method", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_assign_modules_buffers arg:self arguments arg Assign Call Compare Assign Assign" + }, + { + "library": "tensorflow", + "name": "_add_asset_to_metagraph", + "source_code": "def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor):\n asset_proto = meta_graph_def.asset_file_def.add()\n asset_proto.filename = asset_filename\n asset_proto.tensor_info.name = asset_tensor.name", + "docstring": "Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:_add_asset_to_metagraph arg:meta_graph_def arg:asset_filename arg:asset_tensor arguments arg arg arg Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "restore_saveables", + "source_code": "def restore_saveables(self, tensor_saveables, python_positions, registered_savers=None, reader=None):\n if reader is None:\n reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n restore_ops = []\n for position in python_positions:\n key = position.object_proto.attributes[0].checkpoint_key\n position.trackable.deserialize(reader.get_tensor(key))\n if tensor_saveables or registered_savers:\n flat_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables)\n new_restore_ops = functional_saver.MultiDeviceSaver.from_saveables(flat_saveables, registered_savers).restore(self.save_path_tensor, self.options)\n if not context.executing_eagerly():\n for name, restore_op in sorted(new_restore_ops.items()):\n restore_ops.append(restore_op)\n assert name not in self.restore_ops_by_name\n self.restore_ops_by_name[name] = restore_op\n return restore_ops", + "docstring": "Run or build restore operations for SaveableObjects. Args: tensor_saveables: s which correspond to Tensors. python_positions: List of CheckpointPositions bound to objects which must be restored eagerly. registered_savers: a dict mapping saver names-> object name -> Trackable. reader: A . If None, a new instance will be created. Returns: When graph building, a list of restore operations, either cached or newly created, to restore .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:restore_saveables arg:self arg:tensor_saveables arg:python_positions arg:registered_savers arg:reader arguments arg arg arg arg arg If Compare Assign Call Assign For Assign Call Call If BoolOp Assign Call Assign Call Call If Call For Call Call Call Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "to_device", + "source_code": "def to_device(x: Array, device: Device, /, *, stream: int | Any | None=None) -> Array:\n if is_numpy_array(x):\n if stream is not None:\n raise ValueError('The stream argument to to_device() is not supported')\n if device == 'cpu':\n return x\n raise ValueError(f'Unsupported device {device!r}')\n elif is_cupy_array(x):\n return _cupy_to_device(x, device, stream=stream)\n elif is_torch_array(x):\n return _torch_to_device(x, device, stream=stream)\n elif is_dask_array(x):\n if stream is not None:\n raise ValueError('The stream argument to to_device() is not supported')\n if device == 'cpu':\n return x\n raise ValueError(f'Unsupported device {device!r}')\n elif is_jax_array(x):\n if not hasattr(x, '__array_namespace__'):\n import jax.experimental.array_api\n if not hasattr(x, 'to_device'):\n return x\n return x.to_device(device, stream=stream)\n elif is_pydata_sparse_array(x) and device == _device(x):\n return x\n return x.to_device(device, stream=stream)", + "docstring": "Copy the array from the device on which it currently resides to the specified `x.to_device(device, stream=stream)standard to_deviceDevice Support Device Stream x.to(device) ` argument is not supported in PyTorch). See Also -------- device : Hardware device the array data resides on.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:to_device arguments arg arg arg If Call If Compare Raise Call If Compare Return return:yes Raise Call If Call Return return:yes Call If Call Return return:yes Call If Call If Compare Raise Call If Compare Return return:yes Raise Call If Call If Call If Call Return return:yes Return return:yes Call If BoolOp Call Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "numpy", + "name": "flatten", + "source_code": "def flatten(self, order='C'):\n return N.ndarray.flatten(self, order=order)", + "docstring": "Return a flattened copy of the matrix. All elements of the matrix are placed into a single row. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran-style) order. 'A' means to flatten in column-major order if is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten in the order the elements occur in memory. The default is 'C'. Returns ------- y : matrix A copy of the matrix, flattened to a matrix where is the number of elements in the original matrix. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the matrix. Examples -------- >>> m = np.matrix([[1,2], [3,4]]) >>> m.flatten() matrix([[1, 2, 3, 4]]) >>> m.flatten('F') matrix([[1, 3, 2, 4]])", + "type": "method", + "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", + "ast_data": "FunctionDef name:flatten arg:self arg:order arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__str__", + "source_code": "def __str__(self):\n info = {'section': self._section, 'config': self.config, 'req_type': self._req_type, 'req': str(self.req), 'range': str(self.range), 'exclude': str(self.exclude), 'include': str(self.include), 'init': str(self._initialized)}\n req_str = '\\n >>> _Reqs Instance <<<\\n'\n req_str += 'Section: {section}\\n'\n req_str += 'Configuration name: {config}\\n'\n req_str += 'Requirement type: {req_type}\\n'\n req_str += 'Requirement: {req}\\n'\n req_str += 'Range: {range}\\n'\n req_str += 'Exclude: {exclude}\\n'\n req_str += 'Include: {include}\\n'\n req_str += 'Initialized: {init}\\n\\n'\n return req_str.format(**info)", + "docstring": "Prints a requirement and its components. Returns: String that has concatenated information about a requirement.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Call Call Call Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "_kolmogn_p", + "source_code": "def _kolmogn_p(n, x):\n if np.isnan(n):\n return n\n if int(n) != n or n <= 0:\n return np.nan\n if x >= 1.0 or x <= 0:\n return 0\n t = n * x\n if t <= 1.0:\n if t <= 0.5:\n return 0.0\n if n <= 140:\n prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))\n else:\n prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n - 1) * np.log(2 * t - 1))\n return prd * 2 * n ** 2\n if t >= n - 1:\n return 2 * (1.0 - x) ** (n - 1) * n\n if x >= 0.5:\n return 2 * scipy.stats.ksone.pdf(x, n)\n delta = x / 2.0 ** 16\n delta = min(delta, x - 1.0 / n)\n delta = min(delta, 0.5 - x)\n\n def _kk(_x):\n return kolmogn(n, _x)\n return _derivative(_kk, x, dx=delta, order=5)", + "docstring": "Computes the PDF for the two-sided Kolmogorov-Smirnov statistic. x must be of type float, n of type integer.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_ksstats.py", + "ast_data": "FunctionDef name:_kolmogn_p arg:n arg:x arguments arg arg If Call Return return:yes If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Compare Return return:yes Assign If Compare If Compare Return return:yes If Compare Assign Call Call Assign Call Call Call Return return:yes If Compare Return return:yes If Compare Return return:yes Call Assign Assign Call Assign Call FunctionDef name:_kk arg:_x arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "_check_exclude", + "source_code": "def _check_exclude(self, obj):\n if obj.exclude is None:\n return []\n elif not isinstance(obj.exclude, (list, tuple)):\n return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')\n field_counts = collections.Counter(obj.exclude)\n if (duplicate_fields := [field for field, count in field_counts.items() if count > 1]):\n return [checks.Error(\"The value of 'exclude' contains duplicate field(s).\", hint='Remove duplicates of %s.' % ', '.join(map(repr, duplicate_fields)), obj=obj.__class__, id='admin.E015')]\n else:\n return []", + "docstring": "Check that exclude is a sequence without duplicates.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_exclude arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Assign Call If Call Compare Return return:yes Call Call Call Return return:no" + }, + { + "library": "django", + "name": "merge_dicts", + "source_code": "@staticmethod\ndef merge_dicts(dicts):\n merged = {}\n for d in reversed(dicts):\n merged.update(d)\n return merged", + "docstring": "Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.", + "type": "method", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "FunctionDef name:merge_dicts arg:dicts arguments arg Assign For Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n if sparse.issparse(X):\n n_samples = X.shape[0]\n output = []\n for batch in gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0):\n output.append(super().transform(X[batch].toarray()))\n return np.vstack(output)\n else:\n return super().transform(X)", + "docstring": "Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set, using minibatches of size batch_size if X is sparse. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data, where is the number of samples and is the number of features. Returns ------- X_new : ndarray of shape (n_samples, n_components) Projection of X in the first principal components. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], ... [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, n_components=2) >>> ipca.transform(X) # doctest: +SKIP", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_incremental_pca.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg If Call Assign Assign For Call BoolOp Call Call Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_LoopBodyCaptureWrapper", + "source_code": "def _LoopBodyCaptureWrapper(func):\n\n @function.Defun(*_GetInputDtypes(func), func_name='%s_Wrapper' % func.name)\n def Wrapper(*args):\n result = func(*args)\n extra_args = tuple(function.get_extra_args())\n if isinstance(result, ops.Operation):\n return extra_args\n elif not isinstance(result, (list, tuple)):\n return (result,) + extra_args\n else:\n return result + type(result)(extra_args)\n return Wrapper", + "docstring": "Returns a wrapper for that handles loop-carried captured inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py", + "ast_data": "FunctionDef name:_LoopBodyCaptureWrapper arg:func arguments arg FunctionDef name:Wrapper arguments arg Assign Call Assign Call Call If Call Return return:yes If Call Return return:yes Return return:yes Call Call Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "staticfile", + "source_code": "def staticfile(filename, root=None, match='', content_types=None, debug=False):\n request = cherrypy.serving.request\n if request.method not in ('GET', 'HEAD'):\n if debug:\n cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')\n return False\n if match and (not re.search(match, request.path_info)):\n if debug:\n cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICFILE')\n return False\n if not os.path.isabs(filename):\n if not root:\n msg = \"Static tool requires an absolute filename (got '%s').\" % (filename,)\n if debug:\n cherrypy.log(msg, 'TOOLS.STATICFILE')\n raise ValueError(msg)\n filename = os.path.join(root, filename)\n return _attempt(filename, content_types, debug=debug)", + "docstring": "Serve a static resource from the given (root +) filename. match If given, request.path_info will be searched for the given regular expression before attempting to serve static content. content_types If given, it should be a Python dictionary of {file-extension: content-type} pairs, where 'file-extension' is a string (e.g. \"gif\") and 'content-type' is the value to write out in the Content-Type response header (e.g. \"image/gif\").", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\static.py", + "ast_data": "FunctionDef name:staticfile arg:filename arg:root arg:match arg:content_types arg:debug arguments arg arg arg arg arg Assign If Compare If Call Return return:yes If BoolOp Call If Call Return return:yes If Call If Assign If Call Raise Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "parameters", + "source_code": "def parameters(self, recurse: bool=True) -> Iterator[Parameter]:\n for _name, param in self.named_parameters(recurse=recurse):\n yield param", + "docstring": "Return an iterator over module parameters. This is typically passed to an optimizer. Args: recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: Parameter: module parameter Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for param in model.parameters(): >>> print(type(param), param.size()) (20L,) (20L, 1L, 5L, 5L)", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:parameters arg:self arg:recurse arguments arg arg For Call" + }, + { + "library": "tensorflow", + "name": "variable_creator_scope", + "source_code": "@tf_export('variable_creator_scope', v1=[])\n@tf_contextlib.contextmanager\ndef variable_creator_scope(variable_creator):\n with ops.get_default_graph()._variable_creator_scope(variable_creator):\n yield", + "docstring": "Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: * initial_value: A , or Python object convertible to a , which is the initial value for the Variable. The initial value must have a shape specified unless is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) * trainable: If , the default, GradientTapes automatically watch uses of this Variable. * validate_shape: If , allows the variable to be initialized with a value of unknown shape. If , the default, the shape of must be known. * caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not , caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through and other conditional statements. * name: Optional name for the variable. Defaults to and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If , either the datatype will be kept (if is a Tensor), or will decide. * constraint: A constraint function to be applied to the variable after updates by some algorithms. * synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class . By default the synchronization is set to and the current chooses when to synchronize. * aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class . This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:variable_creator_scope arg:variable_creator arguments arg With Call Call Call" + }, + { + "library": "pytorch", + "name": "_is_compiled", + "source_code": "def _is_compiled() -> bool:\n return hasattr(torch._C, '_cuda_getDeviceCount')", + "docstring": "Return true if compile with CUDA support.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:_is_compiled arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "devices", + "source_code": "def devices(self) -> list[_Device]:\n return ['cpu', _DASK_DEVICE]", + "docstring": "The devices supported by Dask. For Dask, this always returns ``. Returns ------- devices : list[Device] The devices supported by Dask. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = xp.__array_namespace_info__() >>> info.devices() ['cpu', DASK_DEVICE]", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py", + "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "array_values", + "source_code": "def array_values(self) -> ExtensionArray:\n return self._block.array_values", + "docstring": "The array that Series.array returns", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:array_values arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "redistribute", + "source_code": "def redistribute(self, device_mesh: Optional[DeviceMesh]=None, placements: Optional[Sequence[Placement]]=None, *, async_op: bool=False, forward_dtype: Optional[torch.dtype]=None, backward_dtype: Optional[torch.dtype]=None) -> 'DTensor':\n device_mesh = device_mesh or self.device_mesh\n if placements is None:\n raise RuntimeError('placements is needed for redistribute!')\n placements = list(placements)\n for i, placement in enumerate(placements):\n if placement.is_partial():\n raise RuntimeError('Can not redistribute to Partial, redistributing to Partial is for internal use only!')\n elif isinstance(placement, Shard) and placement.dim < 0:\n placements[i] = Shard(placement.dim + self.ndim)\n placements = tuple(placements)\n return Redistribute.apply(self, device_mesh, placements, async_op, forward_dtype, backward_dtype)", + "docstring": "`DeviceMeshPlacementDTensor` currently only supports redistributing DTensor on the same DeviceMesh, Please file an issue if you need to redistribute DTensor to different DeviceMesh.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:redistribute arg:self arg:device_mesh arg:placements arguments arg arg arg arg arg arg Assign BoolOp If Compare Raise Call Assign Call For Call If Call Raise Call If BoolOp Call Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "prefetch", + "source_code": "def prefetch(self, buffer_size, name=None) -> 'DatasetV2':\n return prefetch_op._prefetch(self, buffer_size, name=name)", + "docstring": "Creates a that prefetches elements from this dataset. Most dataset input pipelines should end with a call to . This allows later elements to be prepared while the current element is being processed. This often improves latency and throughput, at the cost of using additional memory to store prefetched elements. Note: Like other methods, prefetch operates on the elements of the input dataset. It has no concept of examples vs. batches. will prefetch two elements (2 examples), while will prefetch 2 elements (2 batches, of 20 examples each). >>> dataset = tf.data.Dataset.range(3) >>> dataset = dataset.prefetch(2) >>> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 2] Args: buffer_size: A scalar , representing the maximum number of elements that will be buffered when prefetching. If the value is used, then the buffer size is dynamically tuned. name: Optional. A name for the tf.data transformation. Returns: A new with the transformation applied as described above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:prefetch arg:self arg:buffer_size arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "tokenize_string", + "source_code": "def tokenize_string(source: str) -> Iterator[tuple[int, str]]:\n source = ''.join((create_valid_python_identifier(substring[1:-1]) if is_backtick_quoted else substring for is_backtick_quoted, substring in _split_by_backtick(source)))\n line_reader = StringIO(source).readline\n token_generator = tokenize.generate_tokens(line_reader)\n for toknum, tokval, _, _, _ in token_generator:\n yield (toknum, tokval)", + "docstring": "Tokenize a Python source code string. Parameters ---------- source : str The Python source code string. Returns ------- tok_generator : Iterator[Tuple[int, str]] An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\parsing.py", + "ast_data": "FunctionDef name:tokenize_string arg:source arguments arg Assign Call Call Call Assign Call Assign Call For" + }, + { + "library": "scrapy", + "name": "object_ref", + "source_code": "class object_ref:\n __slots__ = ()\n\n def __new__(cls, *args: Any, **kwargs: Any) -> Self:\n obj = object.__new__(cls)\n live_refs[cls][obj] = time()\n return obj", + "docstring": "Inherit from this class to a keep a record of live instances", + "type": "class", + "file_path": "scrapy\\scrapy\\utils\\trackref.py", + "ast_data": "ClassDef name:object_ref Assign FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "where", + "source_code": "@tf_export.tf_export('experimental.numpy.where', v1=[])\n@np_utils.np_doc_only('where')\ndef where(condition, x=None, y=None):\n condition = asarray(condition, dtype=np.bool_)\n if x is None and y is None:\n return nonzero(condition)\n elif x is not None and y is not None:\n x, y = _promote_dtype(x, y)\n return array_ops.where_v2(condition, x, y)\n raise ValueError('Both x and y must be ndarrays, or both must be None.')", + "docstring": "Raises ValueError if exactly one of x or y is not None.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:where arg:condition arg:x arg:y arguments arg arg arg Assign Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Assign Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "sphinx", + "name": "DefaultSubstitutions", + "source_code": "class DefaultSubstitutions(SphinxTransform):\n default_priority = 210\n\n def apply(self, **kwargs: Any) -> None:\n to_handle = _DEFAULT_SUBSTITUTIONS - set(self.document.substitution_defs)\n for ref in self.document.findall(nodes.substitution_reference):\n if (name := ref['refname']) in to_handle:\n ref.replace_self(self._handle_default_substitution(name))\n\n def _handle_default_substitution(self, name: _DEFAULT_SUBSTITUTION_NAMES) -> nodes.Text:\n if name == 'translation progress':\n return nodes.Text(_calculate_translation_progress(self.document))\n if name == 'today':\n if (text := self.config.today):\n return nodes.Text(text)\n today_fmt = self.config.today_fmt or _('%b %d, %Y')\n return nodes.Text(format_date(today_fmt, language=self.config.language))\n return nodes.Text(getattr(self.config, name))", + "docstring": "Replace some substitutions if they aren't defined in the document.", + "type": "class", + "file_path": "sphinx\\sphinx\\transforms\\__init__.py", + "ast_data": "ClassDef name:DefaultSubstitutions Assign FunctionDef name:apply arg:self arguments arg arg Assign Call For Call If Compare Call Call FunctionDef name:_handle_default_substitution arg:self arg:name arguments arg arg If Compare Return return:yes Call Call If Compare If Return return:yes Call Assign BoolOp Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_run_single_worker", + "source_code": "def _run_single_worker(worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer='', worker_barrier=None, coord=None):\n session_config = copy.deepcopy(session_config)\n strategy = copy.deepcopy(strategy)\n if task_type == _TaskType.EVALUATOR:\n if strategy:\n strategy.configure(session_config)\n else:\n assert strategy\n strategy.configure(session_config, cluster_spec, task_type, task_id)\n context = _WorkerContext(strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier)\n with context:\n if coord:\n with coord.stop_on_exception():\n return worker_fn(strategy)\n else:\n return worker_fn(strategy)", + "docstring": "Runs a single worker by calling under context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py", + "ast_data": "FunctionDef name:_run_single_worker arg:worker_fn arg:strategy arg:cluster_spec arg:task_type arg:task_id arg:session_config arg:rpc_layer arg:worker_barrier arg:coord arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare If Call Call Assign Call With If With Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "build_tokenizer", + "source_code": "def build_tokenizer(self):\n if self.tokenizer is not None:\n return self.tokenizer\n token_pattern = re.compile(self.token_pattern)\n if token_pattern.groups > 1:\n raise ValueError('More than 1 capturing group in token pattern. Only a single group should be captured.')\n return token_pattern.findall", + "docstring": "Return a function that splits a string into a sequence of tokens. Returns ------- tokenizer: callable A function to split a string into a sequence of tokens.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:build_tokenizer arg:self arguments arg If Compare Return return:yes Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_concrete_float", + "source_code": "def is_concrete_float(a: FloatLikeType) -> bool:\n assert isinstance(a, (SymFloat, float))\n if isinstance(a, float):\n return True\n if isinstance(a.node.expr, sympy.core.numbers.Float):\n return True\n return False", + "docstring": "Utility to check if underlying object in SymInt is concrete value. Also returns true if integer is passed in. Args: a (SymInt or float): Object to test if it float", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:is_concrete_float arg:a arguments arg Call If Call Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_tensor", + "source_code": "def to_tensor(self):\n return self.tensor", + "docstring": "Converts this 'WeakTensor' into a 'tf.Tensor'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py", + "ast_data": "FunctionDef name:to_tensor arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "validate_file", + "source_code": "def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n hasher = _resolve_hasher(algorithm, file_hash)\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False", + "docstring": "Validates a file against a sha256 or md5 hash. Args: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:validate_file arg:fpath arg:file_hash arg:algorithm arg:chunk_size arguments arg arg arg arg Assign Call If Compare Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "get_process_group", + "source_code": "@staticmethod\ndef get_process_group(func, args) -> ProcessGroup:\n if func in CollectiveOp.PG_ARG_1:\n return ProcessGroup.unbox(args[1])\n if func in CollectiveOp.PG_ARG_2:\n return ProcessGroup.unbox(args[2])\n if func in CollectiveOp.PG_ARG_3:\n return _resolve_process_group(args[2])\n if func in CollectiveOp.PG_ARG_4:\n return _resolve_process_group(args[3])\n raise TypeError(f'Func {func} not found in {collective_ops}')", + "docstring": "Retrieve the process group for collective operations, except .", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\fake_collectives.py", + "ast_data": "FunctionDef name:get_process_group arg:func arg:args arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "__getitem__", + "source_code": "def __getitem__(self, key: int) -> Any:\n if hasattr(self, FSDP_WRAPPED_MODULE):\n return self._fsdp_wrapped_module.__getitem__(key)\n return super().__getitem__(key)", + "docstring": "Forward indexing calls in case the module is an ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, hooks=None, scaffold=None, master='', config=None, checkpoint_dir=None, stop_grace_period_secs=120, checkpoint_filename_with_path=None):\n session_creator = ChiefSessionCreator(scaffold=scaffold, master=master, config=config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n super(SingularMonitoredSession, self).__init__(session_creator, hooks, should_recover=False, stop_grace_period_secs=stop_grace_period_secs)", + "docstring": "Creates a SingularMonitoredSession. Args: hooks: An iterable of ScaffoldStringConfigProtoclose()` has been called. checkpoint_filename_with_path: A string. Optional path to a checkpoint file from which to restore variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:hooks arg:scaffold arg:master arg:config arg:checkpoint_dir arg:stop_grace_period_secs arg:checkpoint_filename_with_path arguments arg arg arg arg arg arg arg arg Assign Call Call Call" + }, + { + "library": "scipy", + "name": "update", + "source_code": "def update(self, delta_x, delta_grad):\n raise NotImplementedError('The method ``update(delta_x, delta_grad)`` is not implemented.')", + "docstring": "Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:update arg:self arg:delta_x arg:delta_grad arguments arg arg arg Raise Call" + }, + { + "library": "cryptography", + "name": "get_public", + "source_code": "def get_public(self, data: memoryview) -> tuple[tuple[memoryview], memoryview]:\n point, data = _get_sshstr(data)\n return ((point,), data)", + "docstring": "Ed25519 public fields", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:get_public arg:self arg:data arguments arg arg Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_NotAnArray", + "source_code": "class _NotAnArray:\n\n def __init__(self, data):\n self.data = np.asarray(data)\n\n def __array__(self, dtype=None, copy=None):\n return self.data\n\n def __array_function__(self, func, types, args, kwargs):\n if func.__name__ == 'may_share_memory':\n return True\n raise TypeError(\"Don't want to call array_function {}!\".format(func.__name__))", + "docstring": "An object that is convertible to an array. Parameters ---------- data : array-like The data.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "ClassDef name:_NotAnArray FunctionDef name:__init__ arg:self arg:data arguments arg arg Assign Call FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg Return return:yes FunctionDef name:__array_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg If Compare Return return:yes Raise Call Call" + }, + { + "library": "tensorflow", + "name": "generate", + "source_code": "@tf_export('__internal__.distribute.combinations.generate', v1=[])\ndef generate(combinations, test_combinations=()):\n default_combinations = (framework_combinations.EagerGraphCombination(), framework_combinations.TFVersionCombination(), ClusterCombination(), DistributionCombination(), GPUCombination(), TPUCombination())\n combination_decorator = combinations_lib.generate(combinations, test_combinations=default_combinations + test_combinations)\n\n def decorator(test_method_or_class):\n if isinstance(test_method_or_class, type):\n class_object = test_method_or_class\n for name, test_method in six.iteritems(class_object.__dict__.copy()):\n if name.startswith(unittest.TestLoader.testMethodPrefix) and isinstance(test_method, types.FunctionType):\n setattr(class_object, name, _multi_worker_test(test_method))\n return combination_decorator(class_object)\n else:\n return combination_decorator(_multi_worker_test(test_method_or_class))\n return decorator", + "docstring": "Distributed adapter of . All tests with distributed strategy should use this one instead of . This function has support of strategy combinations, GPU/TPU and multi worker support. See for usage.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "FunctionDef name:generate arg:combinations arg:test_combinations arguments arg arg Assign Call Call Call Call Call Call Assign Call FunctionDef name:decorator arg:test_method_or_class arguments arg If Call Assign For Call Call If BoolOp Call Call Call Call Return return:yes Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "derivative", + "source_code": "def derivative(self, x, der=1):\n x, x_shape = self._prepare_x(x)\n y = self._evaluate_derivatives(x, der + 1, all_lower=False)\n return self._finish_y(y, x_shape)", + "docstring": "Evaluate a single derivative of the polynomial at the point x. Parameters ---------- x : array_like Point or points at which to evaluate the derivatives der : integer, optional Which derivative to evaluate (default: first derivative). This number includes the function value as 0th derivative. Returns ------- d : ndarray Derivative interpolated at the x-points. Shape of is determined by replacing the interpolation axis in the original array with the shape of .", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_polyint.py", + "ast_data": "FunctionDef name:derivative arg:self arg:x arg:der arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "parse_row", + "source_code": "def parse_row(self, response: Response, row: dict[str, str]) -> Any:\n raise NotImplementedError", + "docstring": "This method must be overridden with your custom spider functionality", + "type": "method", + "file_path": "scrapy\\scrapy\\spiders\\feed.py", + "ast_data": "FunctionDef name:parse_row arg:self arg:response arg:row arguments arg arg arg Raise" + }, + { + "library": "kornia", + "name": "denormalize_pixel_coordinates", + "source_code": "def denormalize_pixel_coordinates(pixel_coordinates: Tensor, height: int, width: int, eps: float=1e-08) -> Tensor:\n if pixel_coordinates.shape[-1] != 2:\n raise ValueError(f'Input pixel_coordinates must be of shape (*, 2). Got {pixel_coordinates.shape}')\n hw: Tensor = stack([tensor(width), tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n factor: Tensor = tensor(2.0) / (hw - 1).clamp(eps)\n return tensor(1.0) / factor * (pixel_coordinates + 1)", + "docstring": "Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the normalized grid coordinates. Shape can be :math:. width: the maximum width in the x-axis. height: the maximum height in the y-axis. eps: safe division by zero. Return: the denormalized pixel coordinates with shape :math:. Examples: >>> coords = tensor([[-1., -1.]]) >>> denormalize_pixel_coordinates(coords, 100, 50) tensor([[0., 0.]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:denormalize_pixel_coordinates arg:pixel_coordinates arg:height arg:width arg:eps arguments arg arg arg arg If Compare Raise Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "to_tf", + "source_code": "def to_tf(self, **kwargs):\n return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict)", + "docstring": "Convert system representation to . Parameters ---------- kwargs : dict, optional Additional keywords passed to Returns ------- sys : instance of Transfer function of the current system", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:to_tf arg:self arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_count_initializer_size", + "source_code": "def _count_initializer_size(graph: ir.Graph) -> int:\n return sum((v.const_value.nbytes for v in graph.initializers.values() if v.const_value is not None))", + "docstring": "Count the total size of the initializers in bytes.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", + "ast_data": "FunctionDef name:_count_initializer_size arg:graph arguments arg Return return:yes Call Call Compare" + }, + { + "library": "django", + "name": "add_distinct_fields", + "source_code": "def add_distinct_fields(self, *field_names):\n self.distinct_fields = field_names\n self.distinct = True", + "docstring": "Add and resolve the given fields to the query's \"distinct on\" clause.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:add_distinct_fields arg:self arguments arg arg Assign Assign" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):\n self.spatial_index = spatial_index\n self.srid = srid\n kwargs['verbose_name'] = verbose_name\n super().__init__(**kwargs)", + "docstring": "The initialization function for base spatial fields. Takes the following as keyword arguments: srid: The spatial reference system identifier, an OGC standard. Defaults to 4326 (WGS84). spatial_index: Indicates whether to create a spatial index. Defaults to True. Set this instead of 'db_index' for geographic fields since index creation is different for geometry columns.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:verbose_name arg:srid arg:spatial_index arguments arg arg arg arg arg Assign Assign Assign Call Call" + }, + { + "library": "scipy", + "name": "maximum_filter1d", + "source_code": "@_ni_docstrings.docfiller\ndef maximum_filter1d(input, size, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n input = np.asarray(input)\n if np.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n axis = normalize_axis_index(axis, input.ndim)\n if size < 1:\n raise RuntimeError('incorrect filter size')\n output = _ni_support._get_output(output, input)\n if size // 2 + origin < 0 or size // 2 + origin >= size:\n raise ValueError('invalid origin')\n mode = _ni_support._extend_mode_to_code(mode)\n _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0)\n return output", + "docstring": "Calculate a 1-D maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, being the length, regardless of filter size. References ---------- .. [1] .. [2] Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9])", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:maximum_filter1d arg:input arg:size arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call If Call Raise Call Assign Call If Compare Raise Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_try_parse_port", + "source_code": "def _try_parse_port(port_str: str) -> Optional[int]:\n if port_str and re.match('^[0-9]{1,5}$', port_str):\n return int(port_str)\n return None", + "docstring": "Try to extract the port number from ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py", + "ast_data": "FunctionDef name:_try_parse_port arg:port_str arguments arg If BoolOp Call Return return:yes Call Return return:no" + }, + { + "library": "authlib", + "name": "decrypt", + "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n raise NotImplementedError", + "docstring": "Decrypt the given cipher text. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py", + "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "scale_weight_node", + "source_code": "def scale_weight_node(node: Node, modules: dict[str, nn.Module], equalization_scale: torch.Tensor, next_equalization_scale: Optional[torch.Tensor]) -> None:\n if equalization_scale is None:\n return\n if fused_module_supports_equalization(modules[str(node.target)]):\n op_module = modules[str(node.target)][0]\n else:\n op_module = modules[str(node.target)]\n assert nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module)\n weight = op_module.weight\n assert isinstance(weight, torch.Tensor)\n equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)\n scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))\n if next_equalization_scale is None:\n op_module.weight = nn.Parameter(scaled_weight)\n return\n next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight)\n scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)\n op_module.weight = nn.Parameter(scaled_weight)\n bias = op_module.bias\n if bias is None:\n return\n assert isinstance(bias, torch.Tensor)\n next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)\n scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)\n op_module.bias = nn.Parameter(scaled_bias)", + "docstring": "Scale the weights for input-weight equalization by multiplying the weight by 1/equalization_scale and next_equalization_scale Args: node: Current node whose weights we want to scale equalization_scale: Current node's calculated equalization scale next_equalization_scale: Next node's calculated equalization scale if the following node needs to be equalized, 1 otherwise", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:scale_weight_node arg:node arg:modules arg:equalization_scale arg:next_equalization_scale arguments arg arg arg arg If Compare Return return:no If Call Call Assign Call Assign Call BoolOp Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:no Assign Call Assign Call Assign Call Assign If Compare Return return:no Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "_vector_polynomial_value", + "source_code": "def _vector_polynomial_value(poly, x, zero_power=None):\n\n def transition(curr_poly_val, x, poly_coeff):\n res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)\n return res\n if zero_power is None:\n zero_power = x.new_ones(1).expand(x.shape)\n return _polynomial_value(poly, x, zero_power, transition)", + "docstring": "Evaluates for the (batched) vector input . Check out function for more details.", + "type": "function", + "file_path": "pytorch\\torch\\_lobpcg.py", + "ast_data": "FunctionDef name:_vector_polynomial_value arg:poly arg:x arg:zero_power arguments arg arg arg FunctionDef name:transition arg:curr_poly_val arg:x arg:poly_coeff arguments arg arg arg Assign Call Call Return return:yes If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__get_cmp_key", + "source_code": "def __get_cmp_key(self):\n if not hasattr(self, _CACHED_CMP_KEY):\n setattr(self, _CACHED_CMP_KEY, (type(self), self.__make_cmp_key(self._serialize())))\n return getattr(self, _CACHED_CMP_KEY)", + "docstring": "Returns a hashable eq-comparable key for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:__get_cmp_key arg:self arguments arg If Call Call Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "mutate_state", + "source_code": "def mutate_state(self, project_state, preserve=True):\n new_state = project_state\n if preserve:\n new_state = project_state.clone()\n for operation in self.operations:\n operation.state_forwards(self.app_label, new_state)\n return new_state", + "docstring": "Take a ProjectState and return a new one with the migration's operations applied to it. Preserve the original object state by default and return a mutated state from a copy.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\migration.py", + "ast_data": "FunctionDef name:mutate_state arg:self arg:project_state arg:preserve arguments arg arg arg Assign If Assign Call For Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ExpintGrad", + "source_code": "@ops.RegisterGradient('Expint')\ndef _ExpintGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n return grad * math_ops.exp(x) / x", + "docstring": "Compute gradient of expint(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_ExpintGrad arg:op arg:grad arguments arg arg Assign With Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "shape_as", + "source_code": "def shape_as(self, obj):\n if self._zerod:\n return None\n return (obj * self._arr.ndim)(*self._arr.shape)", + "docstring": "Return the shape tuple as an array of some other c-types type. For example: ``.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\_internal.py", + "ast_data": "FunctionDef name:shape_as arg:self arg:obj arguments arg arg If Return return:no Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "StagingError", + "source_code": "class StagingError(AutoGraphError):\n pass", + "docstring": "Raised during the staging (i.e. Python execution) of converted code.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "ClassDef name:StagingError" + }, + { + "library": "django", + "name": "_check_if_value_fixed", + "source_code": "def _check_if_value_fixed(self, value, now=None):\n if now is None:\n now = _get_naive_now()\n offset = datetime.timedelta(seconds=10)\n lower = now - offset\n upper = now + offset\n if isinstance(value, datetime.datetime):\n value = _to_naive(value)\n else:\n assert isinstance(value, datetime.date)\n lower = lower.date()\n upper = upper.date()\n if lower <= value <= upper:\n return [checks.Warning('Fixed default value provided.', hint='It seems you set a fixed date / time / datetime value as default for this field. This may not be what you want. If you want to have the current date as default, use `django.utils.timezone.now`', obj=self, id='fields.W161')]\n return []", + "docstring": "Check if the given value appears to have been provided as a \"fixed\" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:_check_if_value_fixed arg:self arg:value arg:now arguments arg arg arg If Compare Assign Call Assign Call Assign Assign If Call Assign Call Call Assign Call Assign Call If Compare Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "indexed_case", + "source_code": "def indexed_case(branch_index, branch_fns, name='indexed_case', lower_using_switch_merge=None):\n if isinstance(branch_index, int):\n raise TypeError('branch_index must not be a Python int', branch_index)\n with ops.name_scope(name) as scope:\n branch_names = [util.unique_fn_name(scope, 'branch{}'.format(b)) for b in range(len(branch_fns))]\n add_control_dependencies = ops.get_default_graph()._add_control_dependencies\n branch_index = ops.convert_to_tensor(branch_index, name='branch_index')\n branch_graphs = []\n for branch_name, branch_fn in zip(branch_names, branch_fns):\n branch_graphs.append(func_graph_module.func_graph_from_py_func(branch_name, branch_fn, [], {}, func_graph=util.CondBranchFuncGraph(branch_name, collections=ops.get_default_graph()._collections), add_control_dependencies=add_control_dependencies, op_return_value=branch_index))\n verify_captures(_CASE, branch_graphs)\n return _build_case(branch_index, branch_graphs, [g.external_captures for g in branch_graphs], name=scope, lower_using_switch_merge=lower_using_switch_merge)", + "docstring": "Like conv_v2, except emits a Case op instead of an If.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:indexed_case arg:branch_index arg:branch_fns arg:name arg:lower_using_switch_merge arguments arg arg arg arg If Call Raise Call With Call Assign Call Call Call Call Assign Call Assign Call Assign For Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_has_nchw_support", + "source_code": "def _has_nchw_support():\n explicitly_on_cpu = _is_current_explicit_device('CPU')\n gpus_available = bool(_get_available_gpus())\n return not explicitly_on_cpu and gpus_available", + "docstring": "Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_has_nchw_support arguments Assign Call Assign Call Call Return return:yes BoolOp" + }, + { + "library": "matplotlib", + "name": "onmove", + "source_code": "def onmove(self, event):\n if self.ignore(event):\n return\n if not self.canvas.widgetlock.available(self):\n return\n if not self.ax.contains(event)[0]:\n self.linev.set_visible(False)\n self.lineh.set_visible(False)\n if self.needclear:\n self.canvas.draw()\n self.needclear = False\n return\n self.needclear = True\n xdata, ydata = self._get_data_coords(event)\n self.linev.set_xdata((xdata, xdata))\n self.linev.set_visible(self.visible and self.vertOn)\n self.lineh.set_ydata((ydata, ydata))\n self.lineh.set_visible(self.visible and self.horizOn)\n if not (self.visible and (self.vertOn or self.horizOn)):\n return\n if self.useblit:\n if self.background is not None:\n self.canvas.restore_region(self.background)\n self.ax.draw_artist(self.linev)\n self.ax.draw_artist(self.lineh)\n self.canvas.blit(self.ax.bbox)\n else:\n self.canvas.draw_idle()", + "docstring": "Internal event handler to draw the cursor when the mouse moves.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:onmove arg:self arg:event arguments arg arg If Call Return return:no If Call Return return:no If Call Call Call If Call Assign Return return:no Assign Assign Call Call Call BoolOp Call Call BoolOp If BoolOp BoolOp Return return:no If If Compare Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "conditional_reset", + "source_code": "@functools.wraps(reset_func)\ndef conditional_reset(*args, **kwargs):\n datapipe = args[0]\n if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):\n datapipe._number_of_samples_yielded = 0\n datapipe._fast_forward_iterator = None\n reset_func(*args, **kwargs)\n datapipe._snapshot_state = _SnapshotState.Iterating", + "docstring": "Only execute DataPipe's method if is or . This allows recently restored DataPipe to preserve its restored state during the initial call.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py", + "ast_data": "FunctionDef name:conditional_reset arguments arg arg Assign If Compare Assign Assign Call Assign Call" + }, + { + "library": "numpy", + "name": "get_flags", + "source_code": "def get_flags(self):\n return [] + self.pic_flags", + "docstring": "List of flags common to all compiler types.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:get_flags arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_skip_coverage", + "source_code": "@staticmethod\ndef _skip_coverage(path: str) -> bool:\n return '/third-party/' in path", + "docstring": "Returns True if file path should not be processed. This is repo-specific and only makes sense for the current state of ovrsource.", + "type": "method", + "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\llvm_coverage_parser.py", + "ast_data": "FunctionDef name:_skip_coverage arg:path arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "try_", + "source_code": "@staticmethod\ndef try_(method_fn, *args, **kwargs):\n if not chromium_event_log_active():\n return\n metrics_context = get_metrics_context()\n if not metrics_context.in_progress():\n return\n method_fn(*args, **kwargs)", + "docstring": "Special function that quietly runs a given method, returning if CHROMIUM_EVENT_LOG is None or metrics context is not set", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:try_ arg:method_fn arguments arg arg arg If Call Return return:no Assign Call If Call Return return:no Call" + }, + { + "library": "tensorflow", + "name": "legacy_saveable_name", + "source_code": "def legacy_saveable_name(name):\n\n def decorator(cls_or_obj):\n setattr(cls_or_obj, _LEGACY_SAVEABLE_NAME, name)\n return cls_or_obj\n return decorator", + "docstring": "Decorator to set the local name to use in the Checkpoint. Needed for migrating certain Trackables (see next paragraph) from the legacy to the new function. This decorator should be used if the SaveableObject generates tensors with different names from the name that is passed to the factory. Example migration: *Before* *After* Args: name: String name of the SaveableObject factory (the key returned in the function) Returns: A decorator.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\saveable_compat.py", + "ast_data": "FunctionDef name:legacy_saveable_name arg:name arguments arg FunctionDef name:decorator arg:cls_or_obj arguments arg Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "write_csv_from_dict", + "source_code": "def write_csv_from_dict(filename, input_dict):\n f = open(PATH_TO_DIR + '/data/' + filename, 'w')\n for k, v in input_dict.items():\n line = k\n for item in v:\n line += ',' + item\n f.write(line + '\\n')\n f.flush()\n print('Wrote to file %s' % filename)\n check_with_golden(filename)", + "docstring": "Writes out a file from an input dictionary. After writing out the file, it checks the new list against the golden to make sure golden file is up-to-date. Args: filename: String that is the output file name. input_dict: Dictionary that is to be written out to a file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", + "ast_data": "FunctionDef name:write_csv_from_dict arg:filename arg:input_dict arguments arg arg Assign Call For Call Assign For Call Call Call Call" + }, + { + "library": "pandas", + "name": "make_block", + "source_code": "def make_block(values, placement, klass=None, ndim=None, dtype: Dtype | None=None) -> Block:\n warnings.warn('make_block is deprecated and will be removed in a future version. Use pd.api.internals.create_dataframe_from_blocks or (recommended) higher-level public APIs instead.', DeprecationWarning, stacklevel=2)\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n values, dtype = extract_pandas_array(values, dtype, ndim)\n from pandas.core.internals.blocks import ExtensionBlock\n if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):\n klass = None\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(dtype)\n if not isinstance(placement, BlockPlacement):\n placement = BlockPlacement(placement)\n ndim = maybe_infer_ndim(values, placement, ndim)\n if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):\n values = extract_array(values, extract_numpy=True)\n values = ensure_block_shape(values, ndim)\n check_ndim(values, placement, ndim)\n values = maybe_coerce_values(values)\n return klass(values, ndim=ndim, placement=placement)", + "docstring": "This is a pseudo-public analogue to blocks.new_block. We ask that downstream libraries use this rather than any fully-internal APIs, including but not limited to: - core.internals.blocks.make_block - Block.make_block - Block.make_block_same_class - Block.__init__", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\api.py", + "ast_data": "FunctionDef name:make_block arg:values arg:placement arg:klass arg:ndim arg:dtype arguments arg arg arg arg arg Call If Compare Assign Call Assign Call If BoolOp Compare Call Assign If Compare Assign BoolOp Assign Call If Call Assign Call Assign Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "cos", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cos(x):\n return math_ops.cos(x)", + "docstring": "Computes cos of x element-wise. Args: x: Tensor or variable. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:cos arg:x arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "require", + "source_code": "@finalize_array_function_like\n@set_module('numpy')\ndef require(a, dtype=None, requirements=None, *, like=None):\n if like is not None:\n return _require_with_like(like, a, dtype=dtype, requirements=requirements)\n if not requirements:\n return asanyarray(a, dtype=dtype)\n requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}\n if 'E' in requirements:\n requirements.remove('E')\n subok = False\n else:\n subok = True\n order = 'A'\n if requirements >= {'C', 'F'}:\n raise ValueError('Cannot specify both \"C\" and \"F\" order')\n elif 'F' in requirements:\n order = 'F'\n requirements.remove('F')\n elif 'C' in requirements:\n order = 'C'\n requirements.remove('C')\n arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)\n for prop in requirements:\n if not arr.flags[prop]:\n return arr.copy(order)\n return arr", + "docstring": "Return an ndarray of the provided type that satisfies requirements. This function is useful to be sure that an array with the correct flags is returned for passing to compiled code (perhaps through ctypes). Parameters ---------- a : array_like The object to be converted to a type-and-requirement-satisfying array. dtype : data-type The required data-type. If None preserve the current dtype. If your application requires the data to be in native byteorder, include a byteorder specification as a part of the dtype specification. requirements : str or sequence of str The requirements list can be any of the following * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array * 'ALIGNED' ('A') - ensure a data-type aligned array * 'WRITEABLE' ('W') - ensure a writable array * 'OWNDATA' ('O') - ensure an array that owns its own data * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- out : ndarray Array with specified requirements and type if given. See Also -------- asarray : Convert input to an ndarray. asanyarray : Convert to an ndarray, but pass through ndarray subclasses. ascontiguousarray : Convert input to a contiguous array. asfortranarray : Convert input to an ndarray with column-major memory order. ndarray.flags : Information about the memory layout of the array. Notes ----- The returned array will be guaranteed to have the listed requirements by making a copy if needed. Examples -------- >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : False WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) >>> y.flags C_CONTIGUOUS : False F_CONTIGUOUS : True OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_asarray.py", + "ast_data": "FunctionDef name:require arg:a arg:dtype arg:requirements arguments arg arg arg arg If Compare Return return:yes Call If Return return:yes Call Assign Call If Compare Call Assign Assign Assign If Compare Raise Call If Compare Assign Call If Compare Assign Call Assign Call For If Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "RelaxedOneHotCategorical", + "source_code": "class RelaxedOneHotCategorical(TransformedDistribution):\n arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real_vector}\n support = constraints.simplex\n has_rsample = True\n base_dist: ExpRelaxedCategorical\n\n def __init__(self, temperature: Tensor, probs: Optional[Tensor]=None, logits: Optional[Tensor]=None, validate_args: Optional[bool]=None) -> None:\n base_dist = ExpRelaxedCategorical(temperature, probs, logits, validate_args=validate_args)\n super().__init__(base_dist, ExpTransform(), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(RelaxedOneHotCategorical, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def temperature(self) -> Tensor:\n return self.base_dist.temperature\n\n @property\n def logits(self) -> Tensor:\n return self.base_dist.logits\n\n @property\n def probs(self) -> Tensor:\n return self.base_dist.probs", + "docstring": "Creates a RelaxedOneHotCategorical distribution parametrized by :attr:, and either :attr: or :attr:. This is a relaxed version of the :class: distribution, so its samples are on simplex, and are reparametrizable. Example:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), ... torch.tensor([0.1, 0.2, 0.3, 0.4])) >>> m.sample() tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) Args: temperature (Tensor): relaxation temperature probs (Tensor): event probabilities logits (Tensor): unnormalized log probability for each event", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\relaxed_categorical.py", + "ast_data": "ClassDef name:RelaxedOneHotCategorical Assign Assign Assign FunctionDef name:__init__ arg:self arg:temperature arg:probs arg:logits arg:validate_args arguments arg arg arg arg arg Assign Call Call Call Call FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:temperature arg:self arguments arg Return return:yes FunctionDef name:logits arg:self arguments arg Return return:yes FunctionDef name:probs arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_task_info", + "source_code": "def get_task_info(self):\n return (self.task_type, self.task_id)", + "docstring": "Returns job name and task_id for the process which calls this. This returns the job name and task index for the process which calls this function according to its rank and cluster specification. The job name and task index are set after a cluster is constructed by cluster_spec otherwise defaults to None. Returns: A string specifying job name the process belongs to and an integer specifying the task index the process belongs to in that job.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py", + "ast_data": "FunctionDef name:get_task_info arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "add_figure", + "source_code": "def add_figure(self, tag: str, figure: Union['Figure', list['Figure']], global_step: Optional[int]=None, close: bool=True, walltime: Optional[float]=None) -> None:\n torch._C._log_api_usage_once('tensorboard.logging.add_figure')\n if isinstance(figure, list):\n self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='NCHW')\n else:\n self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='CHW')", + "docstring": "Render matplotlib figure into an image and add it to summary. Note that this requires the `` package. Args: tag: Data identifier figure: Figure or a list of figures global_step: Global step value to record close: Flag to automatically close the figure walltime: Optional override default walltime (time.time()) seconds after epoch of event", + "type": "method", + "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", + "ast_data": "FunctionDef name:add_figure arg:self arg:tag arg:figure arg:global_step arg:close arg:walltime arguments arg arg arg arg arg arg Call If Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "adjoint", + "source_code": "def adjoint(self, name: str='adjoint') -> 'LinearOperator':\n if self.is_self_adjoint is True:\n return self\n with self._name_scope(name):\n return self._linop_adjoint()", + "docstring": "Returns the adjoint of the current . Given representing this , return . Note that calling and are equivalent. Args: name: A name for this . Returns: which represents the adjoint of this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:adjoint arg:self arg:name arguments arg arg If Compare Return return:yes With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_log_write_dir", + "source_code": "def _get_log_write_dir(self):\n return distributed_file_utils.write_dirpath(self.log_dir, self.model.distribute_strategy)", + "docstring": "For multi-worker, only chief should write, others write to '/tmp'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_get_log_write_dir arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_compute_cosine_distance", + "source_code": "@classmethod\ndef _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):\n output = []\n if not inputs_normalized:\n with ops.colocate_with(clusters, ignore_existing=True):\n clusters = nn_impl.l2_normalize(clusters, axis=1)\n for inp in inputs:\n with ops.colocate_with(inp, ignore_existing=True):\n if not inputs_normalized:\n inp = nn_impl.l2_normalize(inp, axis=1)\n output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))\n return output", + "docstring": "Computes cosine distance between each input and each cluster center. Args: inputs: list of input Tensor. clusters: cluster Tensor inputs_normalized: if True, it assumes that inp and clusters are normalized and computes the dot product which is equivalent to the cosine distance. Else it L2 normalizes the inputs first. Returns: list of Tensors, where each element corresponds to each element in inp. The value is the distance of each row to all the cluster centers.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py", + "ast_data": "FunctionDef name:_compute_cosine_distance arg:cls arg:inputs arg:clusters arg:inputs_normalized arguments arg arg arg arg Assign If With Call Assign Call For With Call If Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_build_meta_graph", + "source_code": "def _build_meta_graph(obj, signatures, options: save_options.SaveOptions, meta_graph_def: meta_graph_pb2.MetaGraphDef=None):\n with save_context.save_context(options):\n return _build_meta_graph_impl(obj, signatures, options, meta_graph_def)", + "docstring": "Creates a MetaGraph under a save context. Args: obj: A trackable object to build the MetaGraph from. signatures: Can be a with an input signature specified or the result of on a -decorated function . may also be a dictionary, in which case it maps from signature keys to instances. If None, finds signature to export from the -decorated methods in . options: object that specifies options for saving. meta_graph_def: Optional, the MetaGraphDef proto fill. Raises: AssertionError: If is executing inside a . ValueError: If is not trackable. Returns: meta_graph_def: Filled MetaGraphDef proto exported_graph: object generated from . object_saver: of the and its dependencies. asset_info: tuple containing external assets in the . saveable_view.nodes: _SaveableView nodes. saveable_view.node_paths: _SaveableView paths.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_build_meta_graph arg:obj arg:signatures arg:options arg:meta_graph_def arguments arg arg arg arg With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_best_effort_input_batch_size", + "source_code": "def _best_effort_input_batch_size(flat_input):\n for input_ in flat_input:\n shape = input_.shape\n if shape.rank is None:\n continue\n if shape.rank < 2:\n raise ValueError(f'Input tensor should have rank >= 2. Received input={input_} of rank {shape.rank}')\n batch_size = shape.dims[1].value\n if batch_size is not None:\n return batch_size\n return array_ops.shape(flat_input[0])[1]", + "docstring": "Get static input batch size if available, with fallback to the dynamic one. Args: flat_input: An iterable of time major input Tensors of shape . All inputs should have compatible batch sizes. Returns: The batch size in Python integer if available, or a scalar Tensor otherwise. Raises: ValueError: if there is any input with an invalid shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py", + "ast_data": "FunctionDef name:_best_effort_input_batch_size arg:flat_input arguments arg For Assign If Compare If Compare Raise Call Assign If Compare Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_check_archive_signature", + "source_code": "def _check_archive_signature(archive_file: io.BufferedIOBase) -> None:\n signature = archive_file.read(8)\n if signature != b'!\\n':\n raise RuntimeError('Invalid archive file format.')", + "docstring": "Checks if the file has the correct archive header signature. The cursor is moved to the first available file header section after successfully checking the signature. Args: archive_file: The archive file object pointing at its beginning. Raises: RuntimeError: The archive signature is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\ios\\extract_object_files.py", + "ast_data": "FunctionDef name:_check_archive_signature arg:archive_file arguments arg Assign Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "loop", + "source_code": "@staticmethod\ndef loop(coord, timer_interval_secs, target, args=None, kwargs=None):\n looper = LooperThread(coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n looper.start()\n return looper", + "docstring": "Start a LooperThread that calls a function periodically. If is None the thread calls repeatedly. Otherwise is called every seconds. The thread terminates when a stop of the coordinator is requested. Args: coord: A Coordinator. timer_interval_secs: Number. Time boundaries at which to call . target: A callable object. args: Optional arguments to pass to when calling it. kwargs: Optional keyword arguments to pass to when calling it. Returns: The started thread.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:loop arg:coord arg:timer_interval_secs arg:target arg:args arg:kwargs arguments arg arg arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "disconnect", + "source_code": "def disconnect(self, receiver: Any, signal: Any, **kwargs: Any) -> None:\n kwargs.setdefault('sender', self.sender)\n dispatcher.disconnect(receiver, signal, **kwargs)", + "docstring": "Disconnect a receiver function from a signal. This has the opposite effect of the :meth: method, and the arguments are the same.", + "type": "method", + "file_path": "scrapy\\scrapy\\signalmanager.py", + "ast_data": "FunctionDef name:disconnect arg:self arg:receiver arg:signal arguments arg arg arg arg Call Call" + }, + { + "library": "scipy", + "name": "is_pydata_spmatrix", + "source_code": "def is_pydata_spmatrix(m) -> bool:\n base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)\n return base_cls is not None and isinstance(m, base_cls)", + "docstring": "Check whether object is pydata/sparse matrix, avoiding importing the module.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:is_pydata_spmatrix arg:m arguments arg Assign Call Call Return return:yes BoolOp Compare Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n def _gated_grpc_watch_fn(fetches, feeds):\n del fetches, feeds\n return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n super().__init__(sess, grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n self._send_traceback_and_source_code = send_traceback_and_source_code\n self._sent_graph_version = -1\n register_signal_handler()", + "docstring": "Constructor of TensorBoardDebugWrapperSession. Args: sess: The instance to be wrapped. grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a or a of s. E.g., \"localhost:2333\", \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"]. thread_name_filter: Optional filter for thread names. send_traceback_and_source_code: Whether traceback of graph elements and the source code are to be sent to the debug server(s).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code arguments arg arg arg arg arg FunctionDef name:_gated_grpc_watch_fn arg:fetches arg:feeds arguments arg arg Return return:yes Call Call Call Assign Assign Call" + }, + { + "library": "django", + "name": "is_active", + "source_code": "def is_active(self, request):\n return settings.DEBUG is False", + "docstring": "This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis.", + "type": "method", + "file_path": "django\\django\\views\\debug.py", + "ast_data": "FunctionDef name:is_active arg:self arg:request arguments arg arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, on_exit: OnExitType):\n self._on_exit = on_exit\n self._metrics: dict[str, Any] = {}\n self._start_time_ns: int = 0", + "docstring": "Similar to MetricsContext, but used to gather the runtime metrics that are decoupled from compilation, where there's not a natural place to insert a context manager.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:on_exit arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "is_built", + "source_code": "def is_built() -> bool:\n return torch._C._has_mps", + "docstring": "Return whether PyTorch is built with MPS support. Note that this doesn't necessarily mean MPS is available; just that if this PyTorch binary were run a machine with working MPS drivers and devices, we would be able to use it.", + "type": "function", + "file_path": "pytorch\\torch\\backends\\mps\\__init__.py", + "ast_data": "FunctionDef name:is_built arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "_serialize_to_tensors", + "source_code": "def _serialize_to_tensors(self):\n raise NotImplementedError", + "docstring": "Gathers tensors to save to the checkpoint. You should only override and if you are defining a custom resource or variable with custom ops. Otherwise, please store the state of your trackable in objects and add them to Trackable object hierarchy using (for subclasses of ) or overriding the method. For an example of a valid implementation of these two methods, please see . **Invalid implementation** In this example, can be saved and restored from checkpoints, but is incompatible with SavedModel, which tries to convert the serialize/restore functions into tf.functions. This fails because attribute assignment () is not graph-friendly. **Suggested fix** If the attribute should be saved to the checkpoint, then convert it a . **TF1 Saver Compatibility** If your Trackable needs to be comatible with , implement . **AsyncCheckpoint Support** If your Trackable implements , needs to be implemented as well to support asynchronous checkpoint. Returns: A dictionary mapping names to tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py", + "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg Raise" + }, + { + "library": "numpy", + "name": "check_inline", + "source_code": "def check_inline(self):\n return check_inline(self)", + "docstring": "Return the inline keyword recognized by the compiler, empty string otherwise.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\command\\config.py", + "ast_data": "FunctionDef name:check_inline arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_record_memory_history", + "source_code": "def _record_memory_history(enabled: Literal[None, 'state', 'all']='all', *args, **kwargs) -> None:\n if isinstance(enabled, bool):\n return _record_memory_history_legacy(enabled, *args, **kwargs)\n else:\n return _record_memory_history_impl(enabled, *args, **kwargs)", + "docstring": "Enable recording of stack traces associated with memory allocations, so you can tell what allocated any piece of memory in :func:. In addition too keeping stack traces with each current allocation and free, this will also enable recording of a history of all alloc/free events. Use :func: to retrieve this information, and the tools in to visualize snapshots. The Python trace collection is fast (2us per trace), so you may consider enabling this on production jobs if you anticipate ever having to debug memory issues. C++ trace collection is also fast (~50ns/frame), which for many typical programs works out to ~2us per trace, but can vary depending on stack depth. Args: enabled (Literal[None, \"state\", \"all\"], optional): , disable recording memory history. , keep information for currenly allocated memory. , additionally keep a history of all alloc/free calls. Defaults to \"all\". context (Literal[None, \"state\", \"alloc\", \"all\"], optional): , Do not record any tracebacks. , Record tracebacks for currently allocated memory. , additionally keep tracebacks for alloc calls. , additionally keep tracebacks for free calls. Defaults to \"all\". stacks (Literal[\"python\", \"all\"], optional): , include Python, TorchScript, and inductor frames in tracebacks , additionally include C++ frames Defaults to \"all\". max_entries (int, optional): Keep a maximum of alloc/free events in the recorded history recorded.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:_record_memory_history arg:enabled arguments arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "_maindb_connection", + "source_code": "@cached_property\ndef _maindb_connection(self):\n settings_dict = settings.DATABASES[self.connection.alias]\n user = settings_dict.get('SAVED_USER') or settings_dict['USER']\n password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']\n settings_dict = {**settings_dict, 'USER': user, 'PASSWORD': password}\n DatabaseWrapper = type(self.connection)\n return DatabaseWrapper(settings_dict, alias=self.connection.alias)", + "docstring": "This is analogous to other backends' property, which allows access to an \"administrative\" connection which can be used to manage the test databases. For Oracle, the only connection that can be used for that purpose is the main (non-test) connection.", + "type": "method", + "file_path": "django\\django\\db\\backends\\oracle\\creation.py", + "ast_data": "FunctionDef name:_maindb_connection arg:self arguments arg Assign Assign BoolOp Call Assign BoolOp Call Assign Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "dimension", + "source_code": "@property\ndef dimension(self):\n return capi.get_dims(self.ptr)", + "docstring": "Return 0 for points, 1 for lines, and 2 for surfaces.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:dimension arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_data", + "source_code": "def set_data(self, positions):\n method = 'set_xdata' if self.direction == 'horizontal' else 'set_ydata'\n for line, p in zip(self.artists, positions):\n getattr(line, method)([p, p])", + "docstring": "Set x- or y-positions of handles, depending on if the lines are vertical or horizontal. Parameters ---------- positions : tuple of length 2 Set the positions of the handle in data coordinates", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:set_data arg:self arg:positions arguments arg arg Assign Compare For Call Call Call" + }, + { + "library": "pytorch", + "name": "AllOrAnyReductionTypePromotionRule", + "source_code": "class AllOrAnyReductionTypePromotionRule(ReductionTypePromotionRule):\n\n def __init__(self, op_name: str):\n super().__init__('aten', op_name, _prims_common.REDUCTION_OUTPUT_TYPE_KIND.ALWAYS_BOOL)\n\n def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot:\n assert len(args) >= 1, f'Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument'\n arg = args[0]\n assert isinstance(arg, torch.Tensor), f'type(arg)={type(arg)!r} is not torch.Tensor'\n computation_dtype = torch.bool\n result_dtype = torch.uint8 if arg.dtype == torch.uint8 else torch.bool\n return TypePromotionSnapshot({0: computation_dtype}, {}, result_dtype)", + "docstring": "Reference type promotion rule from torch.ops.aten.all or torch.ops.aten.any. This is a special case where computation dtype is always torch.bool. The result dtype is always uint8 if kwarg is uint8, otherwise torch.bool.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "ClassDef name:AllOrAnyReductionTypePromotionRule FunctionDef name:__init__ arg:self arg:op_name arguments arg arg Call Call FunctionDef name:preview_type_promotion arg:self arg:args arg:kwargs arguments arg arg arg Compare Call Assign Call Call Assign Assign Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "descendants", + "source_code": "def descendants(self):\n return self._descendants_with_paths()[0]", + "docstring": "Returns a list of all nodes from self.root using a breadth first traversal.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py", + "ast_data": "FunctionDef name:descendants arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "mesh_broadcast", + "source_code": "def mesh_broadcast(tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int=0, async_op: bool=False, *, group_src: int=0) -> Optional[Work]:\n if tensor.is_meta:\n return None\n dim_group = mesh.get_group(mesh_dim)\n assert isinstance(dim_group, ProcessGroup)\n return broadcast(tensor, group=dim_group, async_op=async_op, group_src=group_src)", + "docstring": "broadcast the tensor to a device mesh dimension. We by default use the first rank of the mesh dimension as the source of truth, i.e for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2 to rank 2/3. Args: tensor (torch.Tensor): tensor to broadcast. mesh_dim (int, optional): indicate which mesh dimension we want to scatter on, we by default choose the first rank on the mesh dimension as source of truth. Keyword args: group_src (int, optional): the group rank of the source data for the logical/global tensor, on the specific mesh dimension. By default, we use `Work` object", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_collective_utils.py", + "ast_data": "FunctionDef name:mesh_broadcast arg:tensor arg:mesh arg:mesh_dim arg:async_op arguments arg arg arg arg arg If Return return:no Assign Call Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "unique", + "source_code": "def unique(list_: Iterable[_T], key: Callable[[_T], Any]=lambda x: x) -> list[_T]:\n seen = set()\n result: list[_T] = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result", + "docstring": "efficient function to uniquify a list preserving item order", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:unique arg:list_ arg:key arguments arg arg arguments arg Assign Call For Assign Call If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_valid_dtypes", + "source_code": "def _valid_dtypes(self):\n return _DEFAULT_VALID_DTYPES", + "docstring": "Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_valid_dtypes arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "PeriodicModelAverager", + "source_code": "class PeriodicModelAverager(ModelAverager):\n\n def __init__(self, period, warmup_steps=0, process_group: Optional[dist.ProcessGroup]=None):\n super().__init__(process_group)\n if warmup_steps < 0:\n raise ValueError('Arg ``warmup_steps`` must be a non-negative number.')\n self.warmup_steps = warmup_steps\n if period < 1:\n raise ValueError('Arg ``period`` must be a positive value.')\n elif period == 1:\n warnings.warn('When period is 1, no need to use model averaging because the communication cost of all-reducing parameters will be no less than the cost of all-reducing gradients by DistributedDataParallel in the backward pass. Therefore, only DistributedDataParallel should be used for this case.')\n self.period = period\n\n def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]]):\n if self.step >= self.warmup_steps and (self.step - self.warmup_steps) % self.period == 0:\n utils.average_parameters_or_parameter_groups(params, _not_none(self.process_group))\n self.step += 1", + "docstring": "Averages parameters periodically after the warm-up stage. This can be used for running _, by running :class: (DDP) using the subgroups created by :meth:. Args: period (int): The number of steps per model averaging. Usually the period should be greater than `torch.distributed.init_process_group` period. >>> averager.average_parameters(model.parameters())", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\averagers.py", + "ast_data": "ClassDef name:PeriodicModelAverager FunctionDef name:__init__ arg:self arg:period arg:warmup_steps arg:process_group arguments arg arg arg arg Call Call If Compare Raise Call Assign If Compare Raise Call If Compare Call Assign FunctionDef name:average_parameters arg:self arg:params arguments arg arg If BoolOp Compare Compare Call Call" + }, + { + "library": "tensorflow", + "name": "after_create_session", + "source_code": "def after_create_session(self, session, coord):\n pass", + "docstring": "Called when new TensorFlow session is created. This is called to signal the hooks that a new session has been created. This has two essential differences with the situation in which is called: * When this is called, the graph is finalized and ops can no longer be added to the graph. * This method will also be called as a result of recovering a wrapped session, not only at the beginning of the overall session. Args: session: A TensorFlow Session that has been created. coord: A Coordinator object which keeps track of all threads.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "FunctionDef name:after_create_session arg:self arg:session arg:coord arguments arg arg arg" + }, + { + "library": "kornia", + "name": "_jpeg_quality_to_scale", + "source_code": "def _jpeg_quality_to_scale(compression_strength: Tensor) -> Tensor:\n scale: Tensor = differentiable_polynomial_floor(torch.where(compression_strength < 50, 5000.0 / compression_strength, 200.0 - 2.0 * compression_strength))\n return scale", + "docstring": "Convert a given JPEG quality to the scaling factor. Args: compression_strength (Tensor): Compression strength ranging from 0 to 100. Any shape is supported. Returns: scale (Tensor): Scaling factor to be applied to quantization matrix. Same shape as input.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\jpeg.py", + "ast_data": "FunctionDef name:_jpeg_quality_to_scale arg:compression_strength arguments arg Call Call Compare Return return:yes" + }, + { + "library": "django", + "name": "scale", + "source_code": "@property\ndef scale(self):\n return TransformPoint(self, 'scale')", + "docstring": "Pixel scale in units of the raster projection.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:scale arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "finalize", + "source_code": "def finalize(self, X, y, sample_weight):\n pass", + "docstring": "Finalize the solvers results. Some solvers may need this, others not.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py", + "ast_data": "FunctionDef name:finalize arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self) -> Tensor:\n rot = self.scale * angle_to_rotation_matrix(self.rot)\n out = convert_affinematrix_to_homography(torch.cat([rot, self.shift], dim=2))\n return out", + "docstring": "Single-batch similarity transform\". Returns: Similarity with shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "resampled", + "source_code": "def resampled(self, lutsize):\n colors = self(np.linspace(0, 1, lutsize))\n new_cmap = ListedColormap(colors, name=self.name)\n new_cmap._rgba_over = self._rgba_over\n new_cmap._rgba_under = self._rgba_under\n new_cmap._rgba_bad = self._rgba_bad\n return new_cmap", + "docstring": "Return a new colormap with *lutsize* entries.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:resampled arg:self arg:lutsize arguments arg arg Assign Call Call Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_save_representative_dataset", + "source_code": "def _save_representative_dataset(representative_dataset: repr_dataset.RepresentativeDatasetOrMapping, signature_def_map: _SignatureDefMap) -> Mapping[str, _RepresentativeDatasetFile]:\n if isinstance(representative_dataset, Mapping):\n if set(signature_def_map.keys()) != set(representative_dataset.keys()):\n raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_def_map.keys())}, representative dataset map: {set(representative_dataset.keys())}.')\n representative_dataset_map = representative_dataset\n elif len(signature_def_map.keys()) > 1:\n raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')\n else:\n representative_dataset_map = {list(signature_def_map.keys())[0]: representative_dataset}\n path_map = {}\n expected_input_key_map = {}\n for signature_key, signature_def in signature_def_map.items():\n _, path_map[signature_key] = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)\n expected_input_key_map[signature_key] = signature_def.inputs.keys()\n return repr_dataset.TfRecordRepresentativeDatasetSaver(path_map=path_map, expected_input_key_map=expected_input_key_map).save(representative_dataset_map)", + "docstring": "Saves the representative dataset to temporary TFRecord files. Args: representative_dataset: Representative dataset used for the calibration step. Representative datasets should exist for each signature def key in . signature_def_map: Signature def key -> SignatureDef mapping. Returns: A map from signature key to the saved representative dataset file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py", + "ast_data": "FunctionDef name:_save_representative_dataset arg:representative_dataset arg:signature_def_map arguments arg arg If Call If Compare Call Call Call Call Raise Call Call Call Call Call Assign If Compare Call Call Raise Call Call Assign Call Call Assign Assign For Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_clim", + "source_code": "def get_clim(self):\n return (self.norm.vmin, self.norm.vmax)", + "docstring": "Return the values (min, max) that are mapped to the colormap limits.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:get_clim arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "apply_mask", + "source_code": "def apply_mask(y_p, sw, mask):\n if mask is not None:\n mask = math_ops.cast(mask, y_p.dtype)\n if sw is not None:\n mask, _, sw = losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw)\n sw *= mask\n else:\n sw = mask\n return sw", + "docstring": "Applies any mask on predictions to sample weights.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:apply_mask arg:y_p arg:sw arg:mask arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "res_list_literal", + "source_code": "def res_list_literal(self, ns, elt_types):\n raise NotImplementedError('subclasses must implement')", + "docstring": "Resolves the type of a list literal from its elements.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py", + "ast_data": "FunctionDef name:res_list_literal arg:self arg:ns arg:elt_types arguments arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "set_y", + "source_code": "def set_y(self, y):\n self._y0 = y\n self.stale = True", + "docstring": "Set the bottom coordinate of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_y arg:self arg:y arguments arg arg Assign Assign" + }, + { + "library": "django", + "name": "close", + "source_code": "def close(self, **kwargs):\n pass", + "docstring": "Close the cache connection", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:close arg:self arguments arg arg" + }, + { + "library": "matplotlib", + "name": "list_all", + "source_code": "def list_all(self):\n self._ensure_entry_points_loaded()\n return [*self.list_builtin(), *self._backend_to_gui_framework]", + "docstring": "Return list of all known backends. These include built-in backends and those obtained at runtime either from entry points or explicit `` syntax. Entry points will be loaded if they haven't been already. Returns ------- list of str Backend names.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py", + "ast_data": "FunctionDef name:list_all arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "public_key", + "source_code": "@abc.abstractmethod\ndef public_key(self) -> Ed25519PublicKey:\n pass", + "docstring": "The Ed25519PublicKey derived from the private key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:public_key arg:self arguments arg" + }, + { + "library": "numpy", + "name": "column_stack", + "source_code": "@array_function_dispatch(_column_stack_dispatcher)\ndef column_stack(tup):\n arrays = []\n for v in tup:\n arr = asanyarray(v)\n if arr.ndim < 2:\n arr = array(arr, copy=None, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)", + "docstring": "Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with . 1-D arrays are turned into 2-D columns first. Parameters ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. Returns ------- stacked : 2-D array The array formed by stacking the given arrays. See Also -------- stack, hstack, vstack, concatenate Examples -------- >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], [2, 3], [3, 4]])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_shape_base_impl.py", + "ast_data": "FunctionDef name:column_stack arg:tup arguments arg Assign For Assign Call If Compare Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_symmetric_qnnpack_qat_qconfig_mapping", + "source_code": "def _get_symmetric_qnnpack_qat_qconfig_mapping() -> QConfigMapping:\n default_qconfig = default_symmetric_qnnpack_qat_qconfig\n return _get_default_qconfig_mapping_with_default_qconfig(True, 'qnnpack', default_qconfig)", + "docstring": "Return a QConfigMapping that uses as the default QConfig.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py", + "ast_data": "FunctionDef name:_get_symmetric_qnnpack_qat_qconfig_mapping arguments Assign Return return:yes Call" + }, + { + "library": "django", + "name": "send_mass_mail", + "source_code": "def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None):\n connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently)\n messages = [EmailMessage(subject, message, sender, recipient, connection=connection) for subject, message, sender, recipient in datatuple]\n return connection.send_messages(messages)", + "docstring": "Given a datatuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user and auth_password are set, use them to log in. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.", + "type": "function", + "file_path": "django\\django\\core\\mail\\__init__.py", + "ast_data": "FunctionDef name:send_mass_mail arg:datatuple arg:fail_silently arg:auth_user arg:auth_password arg:connection arguments arg arg arg arg arg Assign BoolOp Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "less", + "source_code": "@array_function_dispatch(_binary_op_dispatcher)\ndef less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)", + "docstring": "Return (x1 >> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less(x1, 'b') array([True, False, False])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:less arg:x1 arg:x2 arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_in_patch", + "source_code": "def _in_patch(self, patch):\n return lambda xy: patch.contains(SimpleNamespace(x=xy[0], y=xy[1]))[0]", + "docstring": "Return a predicate function testing whether a point *xy* is contained in *patch*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:_in_patch arg:self arg:patch arguments arg arg Return return:yes arguments arg Call Call" + }, + { + "library": "matplotlib", + "name": "resize", + "source_code": "def resize(self, w, h):\n pass", + "docstring": "For GUI backends, resize the window (in physical pixels).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:resize arg:self arg:w arg:h arguments arg arg arg" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, y):\n y = column_or_1d(y, warn=True)\n self.classes_ = _unique(y)\n return self", + "docstring": "Fit label encoder. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted label encoder.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:fit arg:self arg:y arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "__len__", + "source_code": "def __len__(self):\n return len(self.estimators_)", + "docstring": "Return the number of estimators in the ensemble.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_set_last_step_outputs", + "source_code": "def _set_last_step_outputs(ctx, last_step_tensor_outputs):\n last_step_tensor_outputs_dict = nest.pack_sequence_as(ctx.last_step_outputs, last_step_tensor_outputs)\n for name, reduce_op in ctx._last_step_outputs_reduce_ops.items():\n output = last_step_tensor_outputs_dict[name]\n if reduce_op is None:\n last_step_tensor_outputs_dict[name] = values.PerReplica(output)\n else:\n last_step_tensor_outputs_dict[name] = output[0]\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict)", + "docstring": "Sets the last step outputs on the given context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_set_last_step_outputs arg:ctx arg:last_step_tensor_outputs arguments arg arg Assign Call For Call Assign If Compare Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "def get_config(self):\n from tensorflow.python.feature_column.serialization import serialize_feature_column\n config = dict(zip(self._fields, self))\n config['categorical_column'] = serialize_feature_column(self.categorical_column)\n return config", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "suggest_name", + "source_code": "def suggest_name(self):\n if self.initial:\n return 'initial'\n raw_fragments = [op.migration_name_fragment for op in self.operations]\n fragments = [re.sub('\\\\W+', '_', name) for name in raw_fragments if name]\n if not fragments or len(fragments) != len(self.operations):\n return 'auto_%s' % get_migration_name_timestamp()\n name = fragments[0]\n for fragment in fragments[1:]:\n new_name = f'{name}_{fragment}'\n if len(new_name) > 52:\n name = f'{name}_and_more'\n break\n name = new_name\n return name", + "docstring": "Suggest a name for the operations this migration might represent. Names are not guaranteed to be unique, but put some effort into the fallback name to avoid VCS conflicts if possible.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\migration.py", + "ast_data": "FunctionDef name:suggest_name arg:self arguments arg If Return return:yes Assign Assign Call If BoolOp Compare Call Call Return return:yes Call Assign For Assign If Compare Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_make_alias", + "source_code": "def _make_alias(fn, name):\n\n def _fn(*args, **kwargs):\n return fn(*args, **kwargs)\n _fn.__name__ = name\n _fn.__module__ = inspect.currentframe().f_back.f_globals['__name__']\n return _fn", + "docstring": "This function defines an alias of another function and sets its __name__ argument. It also sets its __module__ argument to the module of the caller. Note that when naively doing , we have that , and .", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:_make_alias arg:fn arg:name arguments arg arg FunctionDef name:_fn arguments arg arg Return return:yes Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_finalize_rasterization", + "source_code": "def _finalize_rasterization(draw):\n\n @wraps(draw)\n def draw_wrapper(artist, renderer, *args, **kwargs):\n result = draw(artist, renderer, *args, **kwargs)\n if renderer._rasterizing:\n renderer.stop_rasterizing()\n renderer._rasterizing = False\n return result\n return draw_wrapper", + "docstring": "Decorator for Artist.draw method. Needed on the outermost artist, i.e. Figure, to finish up if the render is still in rasterized mode.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:_finalize_rasterization arg:draw arguments arg FunctionDef name:draw_wrapper arg:artist arg:renderer arguments arg arg arg arg Assign Call If Call Assign Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "learn_cache_key", + "source_code": "def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):\n if key_prefix is None:\n key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n if cache_timeout is None:\n cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n cache_key = _generate_cache_header_key(key_prefix, request)\n if cache is None:\n cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]\n if response.has_header('Vary'):\n is_accept_language_redundant = settings.USE_I18N\n headerlist = []\n for header in cc_delim_re.split(response.headers['Vary']):\n header = header.upper().replace('-', '_')\n if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:\n headerlist.append('HTTP_' + header)\n headerlist.sort()\n cache.set(cache_key, headerlist, cache_timeout)\n return _generate_cache_key(request, request.method, headerlist, key_prefix)\n else:\n cache.set(cache_key, [], cache_timeout)\n return _generate_cache_key(request, request.method, [], key_prefix)", + "docstring": "Learn what headers to take into account for some request URL from the response object. Store those headers in a global URL registry so that later access to that URL will know what headers to take into account without building the response object itself. The headers are named in the Vary header of the response, but we want to prevent response generation. The list of headers to use for cache key generation is stored in the same cache as the pages themselves. If the cache ages some data out of the cache, this just means that we have to build the response once to get at the Vary header and so at the list of headers to use for the cache key.", + "type": "function", + "file_path": "django\\django\\utils\\cache.py", + "ast_data": "FunctionDef name:learn_cache_key arg:request arg:response arg:cache_timeout arg:key_prefix arg:cache arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign Call If Compare Assign If Call Assign Assign For Call Assign Call Call If BoolOp Compare Call Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_GatherLayerBroadcaster", + "source_code": "class _GatherLayerBroadcaster(_LayerBroadcaster):\n\n def __init__(self, gather_index):\n gather_index = ops.convert_to_tensor(gather_index)\n if gather_index.dtype != dtypes.int64 and gather_index.dtype != dtypes.int32:\n raise ValueError('gather_index must be int64 or int32')\n self._gather_index = gather_index\n\n @property\n def gather_index(self):\n return self._gather_index\n\n def with_dtype(self, dtype):\n return _GatherLayerBroadcaster(math_ops.cast(self._gather_index, dtype))\n\n def with_dependencies(self, checks):\n new_gather_index = control_flow_ops.with_dependencies(checks, self._gather_index)\n return _GatherLayerBroadcaster(new_gather_index)", + "docstring": "Implements _LayerBroadcaster with an explicit gather_index. For example, suppose that the source shape is: [*],[*,*] And the target shape is: [*],[*,*],[*],[*,*] Then, this can be represented with a map: [0,1,2,0,1,2]", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "ClassDef name:_GatherLayerBroadcaster FunctionDef name:__init__ arg:self arg:gather_index arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign FunctionDef name:gather_index arg:self arguments arg Return return:yes FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg Return return:yes Call Call FunctionDef name:with_dependencies arg:self arg:checks arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_common_prefix", + "source_code": "def _common_prefix(self, m):\n if not m:\n return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1", + "docstring": "Given a list of str, returns the longest common prefix. Args: m: (list of str) A list of strings. Returns: (str) The longest common prefix.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:_common_prefix arg:self arg:m arguments arg arg If Return return:yes Assign Call Assign Call For Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_distributed_dataset", + "source_code": "def get_distributed_dataset(dataset, input_workers, strategy, num_replicas_in_sync=None, input_context=None, options=None, build=True, replica_order=None):\n if tf2.enabled():\n return input_lib.DistributedDataset(input_workers, strategy, dataset, num_replicas_in_sync=num_replicas_in_sync, input_context=input_context, build=build, options=options, replica_order=replica_order)\n else:\n return input_lib_v1.DistributedDatasetV1(dataset, input_workers, strategy, num_replicas_in_sync=num_replicas_in_sync, input_context=input_context, options=options)", + "docstring": "Returns a distributed dataset from the given tf.data.Dataset instance. This is a common function that is used by all strategies to return a distributed dataset. The distributed dataset instance returned is different depending on if we are in a TF 1 or TF 2 context. The distributed dataset instances returned differ from each other in the APIs supported by each of them. Args: dataset: a tf.data.Dataset instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. strategy: a object, used to run all-reduce to handle last partial batch. num_replicas_in_sync: Optional integer. If this is not None, the value is used to decide how to rebatch datasets into smaller batches so that the total batch size for each step (across all workers and replicas) adds up to 's batch size. input_context: for sharding. Only pass this in for between graph multi-worker cases where there is only one . In these cases, we will shard based on the and in the . options: Default is None. used to control options on how this dataset is distributed. build: whether to build underlying datasets when a DistributedDataset is created. This is only useful for now. replica_order: the order of the replicas, which will be used to reorder the iterators to match the device order. Returns: A distributed dataset instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_util.py", + "ast_data": "FunctionDef name:get_distributed_dataset arg:dataset arg:input_workers arg:strategy arg:num_replicas_in_sync arg:input_context arg:options arg:build arg:replica_order arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "embedding_lookup", + "source_code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]:\n if not self._built:\n self._maybe_build()\n context = EmbeddingPipeliningContext(_PIPELINE_MODE_FORWARD, self._pipelining)\n context.Enter()\n partitioned_tensors = self.enqueue(features, weights)\n context.Exit()\n result = self.dequeue(partitioned_tensors)\n return result", + "docstring": "Perform embedding lookup on the input feature. Args: features: A nested structure of s, s or s, with the same structure as . Inputs will be downcast to . Only one type out of or is supported per call. weights: If not , a nested structure of s, s or s, matching the above, except that the tensors should be of float type (and they will be downcast to ). For s we assume the are the same for the parallel entries from and similarly for s we assume the row_splits are the same. Raises: ValueError: If the input feature is not one of the Tensor, SparseTensor or RaggedTensor type. TypeError: If the type of any sequence in does not match corresponding sequence in . Similarly for , if not . Returns: packed_activations: Embedding lookup results packed as the same sequence of the input feature. packed_output: A dict of PartitionedCsrFormatTensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:embedding_lookup arg:self arg:features arg:weights arguments arg arg arg If Call Assign Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_scale_mm_configs", + "source_code": "def _scale_mm_configs(self, m: int, n: int, k: int, configs: list[BaseConfig], scale: float, has_int8_tensor: bool, exclude: Callable[[int, int, int], bool]) -> list[BaseConfig]:\n from .runtime.runtime_utils import next_power_of_2\n min_block_size = 16\n min_block_size_k = 32 if has_int8_tensor else 16\n m = max(next_power_of_2(V.graph.sizevars.size_hint(m, fallback=config.unbacked_symint_fallback)), min_block_size)\n n = max(next_power_of_2(V.graph.sizevars.size_hint(n, fallback=config.unbacked_symint_fallback)), min_block_size)\n k = max(next_power_of_2(V.graph.sizevars.size_hint(k, fallback=config.unbacked_symint_fallback)), min_block_size_k)\n scaled_configs = []\n for c in configs:\n scaled_config = dataclasses.replace(c, block_m=max(min(int(c.block_m * scale), m), min_block_size), block_n=max(min(int(c.block_n * scale), n), min_block_size), block_k=max(min(int(c.block_k * scale), k), min_block_size_k))\n if not exclude(scaled_config.block_m, scaled_config.block_n, scaled_config.block_k):\n scaled_configs.append(scaled_config)\n return scaled_configs", + "docstring": "Scales and filters matrix multiplication configs based on input size.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py", + "ast_data": "FunctionDef name:_scale_mm_configs arg:self arg:m arg:n arg:k arg:configs arg:scale arg:has_int8_tensor arg:exclude arguments arg arg arg arg arg arg arg arg Assign Assign Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign For Assign Call Call Call Call Call Call Call Call Call Call If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "mark_as_unsaveable", + "source_code": "def mark_as_unsaveable(self, error_message):\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)", + "docstring": "Marks this FuncGraph as unsaveable. Any attempts to export this FuncGraph will raise an error with the specified message. Args: error_message: List or string containing the error message to be raised when saving this FuncGraph to SavedModel.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:mark_as_unsaveable arg:self arg:error_message arguments arg arg Assign If Call Assign Call" + }, + { + "library": "scikit-learn", + "name": "_n_features_out", + "source_code": "@property\ndef _n_features_out(self):\n return self.components_.shape[0]", + "docstring": "Number of transformed output features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py", + "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_on_read_restore_ops", + "source_code": "def get_on_read_restore_ops(var, tensor, aggregation):\n if aggregation == vs.VariableAggregation.SUM:\n strategy = var.distribute_strategy\n tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync, var.dtype)\n return control_flow_ops.group(tuple((assign_on_device(v.device, v, tensor) for v in var.values)))", + "docstring": "Return restore ops for ON_READ variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", + "ast_data": "FunctionDef name:get_on_read_restore_ops arg:var arg:tensor arg:aggregation arguments arg arg arg If Compare Assign Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "device_memory_used", + "source_code": "def device_memory_used(device: Optional[Union[Device, int]]=None) -> int:\n if not torch.version.hip:\n handle = _get_pynvml_handler()\n device = _get_nvml_device_index(device)\n handle = pynvml.nvmlDeviceGetHandleByIndex(device)\n return pynvml.nvmlDeviceGetMemoryInfo(handle).used\n else:\n return _get_amdsmi_device_memory_used(device)", + "docstring": "Return used global (device) memory in bytes as given by or . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:device_memory_used arg:device arguments arg If Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "all_reduce_sum_gradients", + "source_code": "def all_reduce_sum_gradients(grads_and_vars):\n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n if filtered_grads_and_vars:\n if strategy_supports_no_merge_call():\n grads = [pair[0] for pair in filtered_grads_and_vars]\n reduced = distribute_lib.get_strategy().extended._replica_ctx_all_reduce(ds_reduce_util.ReduceOp.SUM, grads)\n else:\n reduced = distribute_lib.get_replica_context().merge_call(_all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n else:\n reduced = []\n reduced_with_nones = []\n reduced_pos = 0\n for g, v in grads_and_vars:\n if g is None:\n reduced_with_nones.append((None, v))\n else:\n reduced_with_nones.append((reduced[reduced_pos], v))\n reduced_pos += 1\n assert reduced_pos == len(reduced), 'Failed to add all gradients'\n return reduced_with_nones", + "docstring": "Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: List of (gradient, variable) pairs where gradients have been all-reduced.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py", + "ast_data": "FunctionDef name:all_reduce_sum_gradients arg:grads_and_vars arguments arg Assign Call Assign Call If If Call Assign Assign Call Call Assign Call Call Assign Assign Assign For If Compare Call Call Compare Call Return return:yes" + }, + { + "library": "authlib", + "name": "InvalidSoftwareStatementError", + "source_code": "class InvalidSoftwareStatementError(OAuth2Error):\n error = 'invalid_software_statement'", + "docstring": "The software statement presented is invalid.", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\errors.py", + "ast_data": "ClassDef name:InvalidSoftwareStatementError Assign" + }, + { + "library": "scipy", + "name": "WayburnSeader02", + "source_code": "class WayburnSeader02(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n self.custom_bounds = ([-1, 2], [-1, 2])\n self.global_optimum = [[0.2, 1.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n u = (1.613 - 4 * (x[0] - 0.3125) ** 2 - 4 * (x[1] - 1.625) ** 2) ** 2\n v = (x[1] - 1) ** 2\n return u + v", + "docstring": "Wayburn and Seader 2 objective function. This class defines the Wayburn and Seader 2 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{WayburnSeader02}}(x) = \\left[ 1.613 - 4(x_1 - 0.3125)^2 - 4(x_2 - 1.625)^2 \\right]^2 + (x_2 - 1)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py", + "ast_data": "ClassDef name:WayburnSeader02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "chebcompanion", + "source_code": "def chebcompanion(c):\n [c] = pu.as_series([c])\n if len(c) < 2:\n raise ValueError('Series must have maximum degree of at least 1.')\n if len(c) == 2:\n return np.array([[-c[0] / c[1]]])\n n = len(c) - 1\n mat = np.zeros((n, n), dtype=c.dtype)\n scl = np.array([1.0] + [np.sqrt(0.5)] * (n - 1))\n top = mat.reshape(-1)[1::n + 1]\n bot = mat.reshape(-1)[n::n + 1]\n top[0] = np.sqrt(0.5)\n top[1:] = 1 / 2\n bot[...] = top\n mat[:, -1] -= c[:-1] / c[-1] * (scl / scl[-1]) * 0.5\n return mat", + "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is a Chebyshev basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of Chebyshev series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg).", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebcompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_gradients", + "source_code": "def get_gradients(self, loss, params):\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))\n return grads", + "docstring": "Returns gradients of with respect to . Should be used only in legacy v1 graph mode. Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:get_gradients arg:self arg:loss arg:params arguments arg arg arg Assign Call With Call Call Call Assign Call For Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_CustomReducer", + "source_code": "class _CustomReducer:\n\n def __init__(self, init_value, reduce_fn):\n self.init_value = init_value\n self.reduce_fn = reduce_fn", + "docstring": "Custom reducer class that can be used to specify a custom operation that reduces losses of multiple microbatches into one value. Example: >>> # xdoctest: +SKIP >>> sum_reducer = _CustomReducer( >>> torch.tensor(0.0), >>> lambda a, b: a + b >>> )", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\microbatch.py", + "ast_data": "ClassDef name:_CustomReducer FunctionDef name:__init__ arg:self arg:init_value arg:reduce_fn arguments arg arg arg Assign Assign" + }, + { + "library": "matplotlib", + "name": "_clear", + "source_code": "def _clear(self):\n self._position = None", + "docstring": "Clear things directly related to the spine. In this way it is possible to avoid clearing the Axis as well when calling from library code where it is known that the Axis is cleared separately.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "FunctionDef name:_clear arg:self arguments arg Assign" + }, + { + "library": "tensorflow", + "name": "_show_tag_sets", + "source_code": "def _show_tag_sets(saved_model_dir):\n tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n print('The given SavedModel contains the following tag-sets:')\n for tag_set in sorted(tag_sets):\n print('%r' % ', '.join(sorted(tag_set)))", + "docstring": "Prints the tag-sets stored in SavedModel directory. Prints all the tag-sets for MetaGraphs stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py", + "ast_data": "FunctionDef name:_show_tag_sets arg:saved_model_dir arguments arg Assign Call Call For Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "LinearScale", + "source_code": "class LinearScale(ScaleBase):\n name = 'linear'\n\n def __init__(self, axis):\n pass\n\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(AutoLocator())\n axis.set_major_formatter(ScalarFormatter())\n axis.set_minor_formatter(NullFormatter())\n if axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or (axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):\n axis.set_minor_locator(AutoMinorLocator())\n else:\n axis.set_minor_locator(NullLocator())\n\n def get_transform(self):\n return IdentityTransform()", + "docstring": "The default linear scale.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "ClassDef name:LinearScale Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call Call Call If BoolOp BoolOp Compare BoolOp Compare Call Call Call Call FunctionDef name:get_transform arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "maybe_shuffle_batch", + "source_code": "@tf_export(v1=['train.maybe_shuffle_batch'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.filter(...).shuffle(min_after_dequeue).batch(batch_size)`.')\ndef maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue, keep_input, num_threads=1, seed=None, enqueue_many=False, shapes=None, allow_smaller_final_batch=False, shared_name=None, name=None):\n return _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue, keep_input, num_threads=num_threads, seed=seed, enqueue_many=enqueue_many, shapes=shapes, allow_smaller_final_batch=allow_smaller_final_batch, shared_name=shared_name, name=name)", + "docstring": "Creates batches by randomly shuffling conditionally-enqueued tensors. See docstring in for more details. Args: tensors: The list or dictionary of tensors to enqueue. batch_size: The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. min_after_dequeue: Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements. keep_input: A Tensor. This tensor controls whether the input is added to the queue or not. If it is a scalar and evaluates , then are all added to the queue. If it is a vector and is , then each example is added to the queue only if the corresponding value in is . This tensor essentially acts as a filtering mechanism. num_threads: The number of threads enqueuing . seed: Seed for the random shuffling within the queue. enqueue_many: Whether each tensor in is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for . allow_smaller_final_batch: (Optional) Boolean. If , allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional) If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the types as . Raises: ValueError: If the are not specified, and cannot be inferred from the elements of . @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:maybe_shuffle_batch arg:tensors arg:batch_size arg:capacity arg:min_after_dequeue arg:keep_input arg:num_threads arg:seed arg:enqueue_many arg:shapes arg:allow_smaller_final_batch arg:shared_name arg:name arguments arg arg arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_build_ragged_tensors", + "source_code": "def _build_ragged_tensors(serialized_shape, ragged_values, ragged_row_splits, ragged_inner_splits=None):\n if ragged_inner_splits is not None:\n ragged_values = [ragged_tensor.RaggedTensor.from_row_splits(val, split, validate=False) for val, split in zip(ragged_values, ragged_inner_splits)]\n if serialized_shape.ndims == 0:\n return ragged_values\n else:\n return [ragged_tensor.RaggedTensor.from_row_splits(val, split, validate=False) for val, split in zip(ragged_values, ragged_row_splits)]", + "docstring": "Builds RaggedTensors from the outputs of a parse op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_build_ragged_tensors arg:serialized_shape arg:ragged_values arg:ragged_row_splits arg:ragged_inner_splits arguments arg arg arg arg If Compare Assign Call Call If Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "set_configuration_from_input_tensors", + "source_code": "def set_configuration_from_input_tensors(self, input_tensors):\n if len(input_tensors) != self.number_of_tuple_elements:\n raise ValueError(f'input_tensors is {str(input_tensors)}, but should be a list of {self.number_of_tuple_elements} Tensors')\n self.set_tuple_shapes([t.shape for t in input_tensors])\n self.set_tuple_types([t.dtype for t in input_tensors])", + "docstring": "Sets the shapes and types of the queue tuple elements. input_tensors is a list of Tensors whose types and shapes are used to set the queue configuration. Args: input_tensors: list of Tensors of the same types and shapes as the desired queue Tuple. Raises: ValueError: if input_tensors is not a list of length self.number_of_tuple_elements", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:set_configuration_from_input_tensors arg:self arg:input_tensors arguments arg arg If Compare Call Raise Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "fill", + "source_code": "def fill(self, *args):\n if len(args):\n _fillcolor = args[0]\n else:\n _fillcolor = self._fillcolor\n return self._hatch or (_fillcolor is not None and (len(_fillcolor) <= 3 or _fillcolor[3] != 0.0))", + "docstring": "Predicate: does the path need to be filled? An optional argument can be used to specify an alternative _fillcolor, as needed by RendererPdf.draw_markers.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:fill arg:self arguments arg arg If Call Assign Assign Return return:yes BoolOp BoolOp Compare BoolOp Compare Call Compare" + }, + { + "library": "tensorflow", + "name": "SessionRunContext", + "source_code": "@tf_export(v1=['train.SessionRunContext'])\nclass SessionRunContext:\n\n def __init__(self, original_args, session):\n self._original_args = original_args\n self._session = session\n self._stop_requested = False\n\n @property\n def original_args(self):\n return self._original_args\n\n @property\n def session(self):\n return self._session\n\n @property\n def stop_requested(self):\n return self._stop_requested\n\n def request_stop(self):\n self._stop_requested = True", + "docstring": "Provides information about the call being made. Provides information about original request to function. SessionRunHook objects can stop the loop by calling of . In the future we may use this object to add more information about run without changing the Hook API.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "ClassDef name:SessionRunContext FunctionDef name:__init__ arg:self arg:original_args arg:session arguments arg arg arg Assign Assign Assign FunctionDef name:original_args arg:self arguments arg Return return:yes FunctionDef name:session arg:self arguments arg Return return:yes FunctionDef name:stop_requested arg:self arguments arg Return return:yes FunctionDef name:request_stop arg:self arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "match_filenames_once", + "source_code": "@tf_export('io.match_filenames_once', v1=['io.match_filenames_once', 'train.match_filenames_once'])\n@deprecation.deprecated_endpoints('train.match_filenames_once')\ndef match_filenames_once(pattern, name=None):\n with ops.name_scope(name, 'matching_filenames', [pattern]) as name:\n return variable_v1.VariableV1(name=name, initial_value=io_ops.matching_files(pattern), trainable=False, validate_shape=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])", + "docstring": "Save the list of files matching pattern, so it is only computed once. NOTE: The order of the files returned is deterministic. Args: pattern: A file pattern (glob), or 1D tensor of file patterns. name: A name for the operations (optional). Returns: A variable that is initialized to the list of files matching the pattern(s).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:match_filenames_once arg:pattern arg:name arguments arg arg With Call Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "byte", + "source_code": "def byte(self):\n return self._to(torch.uint8)", + "docstring": "Casts this storage to byte type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:byte arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "chebweight", + "source_code": "def chebweight(x):\n w = 1.0 / (np.sqrt(1.0 + x) * np.sqrt(1.0 - x))\n return w", + "docstring": "The weight function of the Chebyshev polynomials. The weight function is :math: and the interval of integration is :math:. The Chebyshev polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at .", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebweight arg:x arguments arg Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_picker", + "source_code": "def get_picker(self):\n return self._picker", + "docstring": "Return the picking behavior of the artist. The possible values are described in . See Also -------- .Artist.set_picker, .Artist.pickable, .Artist.pick", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_picker arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_set_covariance", + "source_code": "def _set_covariance(self, covariance):\n covariance = check_array(covariance)\n self.covariance_ = covariance\n if self.store_precision:\n self.precision_ = linalg.pinvh(covariance, check_finite=False)\n else:\n self.precision_ = None", + "docstring": "Saves the covariance and precision estimates Storage is done accordingly to . Precision stored only if invertible. Parameters ---------- covariance : array-like of shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py", + "ast_data": "FunctionDef name:_set_covariance arg:self arg:covariance arguments arg arg Assign Call Assign If Assign Call Assign" + }, + { + "library": "pandas", + "name": "_constructor", + "source_code": "@cache_readonly\ndef _constructor(self) -> type[Index]:\n return Index", + "docstring": "return the class to use for construction", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_constructor arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_minorticklabels", + "source_code": "def get_minorticklabels(self):\n self._update_ticks()\n ticks = self.get_minor_ticks()\n labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]\n labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]\n return labels1 + labels2", + "docstring": "Return this Axis' minor tick labels, as a list of .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_minorticklabels arg:self arguments arg Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_device_capability", + "source_code": "def get_device_capability(device: Optional[_device_t]=None) -> tuple[int, int]:\n prop = get_device_properties(device)\n return (prop.major, prop.minor)", + "docstring": "Get the cuda capability of a device. Args: device (torch.device or int or str, optional): device for which to return the device capability. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: tuple(int, int): the major and minor cuda capability of the device", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:get_device_capability arg:device arguments arg Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "manhattan_distances", + "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix', None]}, prefer_skip_nested_validation=True)\ndef manhattan_distances(X, Y=None):\n X, Y = check_pairwise_arrays(X, Y)\n if issparse(X) or issparse(Y):\n X = csr_matrix(X, copy=False)\n Y = csr_matrix(Y, copy=False)\n X.sum_duplicates()\n Y.sum_duplicates()\n D = np.zeros((X.shape[0], Y.shape[0]))\n _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)\n return D\n return distance.cdist(X, Y, 'cityblock')", + "docstring": "Compute the L1 distances between the vectors in X and Y. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) An array where each row is a sample and each column is a feature. Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None An array where each row is a sample and each column is a feature. If , method uses . Returns ------- distances : ndarray of shape (n_samples_X, n_samples_Y) Pairwise L1 distances. Notes ----- When X and/or Y are CSR sparse matrices and they are not already in canonical format, this function modifies them in-place to make them canonical. Examples -------- >>> from sklearn.metrics.pairwise import manhattan_distances >>> manhattan_distances([[3]], [[3]]) array([[0.]]) >>> manhattan_distances([[3]], [[2]]) array([[1.]]) >>> manhattan_distances([[2]], [[3]]) array([[1.]]) >>> manhattan_distances([[1, 2], [3, 4]], [[1, 2], [0, 3]]) array([[0., 2.], [4., 4.]])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py", + "ast_data": "FunctionDef name:manhattan_distances arg:X arg:Y arguments arg arg Assign Call If BoolOp Call Call Assign Call Assign Call Call Call Assign Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "dot", + "source_code": "def dot(self, V):\n assert V.shape == (self.m,)\n return np.bincount(self.rows, weights=self.vals * V[self.cols], minlength=self.m)", + "docstring": "Dot product of self by a vector *V* in sparse-dense to dense format *V* dense vector of shape (self.m,).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:dot arg:self arg:V arguments arg arg Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "Graph", + "source_code": "class Graph(collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):\n\n def __repr__(self):\n return self.as_dot()\n\n def as_dot(self):\n result = 'digraph CFG {\\n'\n for node in self.index.values():\n result += ' %s [label=\"%s\"];\\n' % (id(node), node)\n for node in self.index.values():\n for next_ in node.next:\n result += ' %s -> %s;\\n' % (id(node), id(next_))\n result += '}'\n return result", + "docstring": "A Control Flow Graph. The CFG maintains an index to allow looking up a CFG node by the AST node to which it is associated. The index can also be enumerated in top-down, depth first order. Walking the graph in forward or reverse order is supported by double parent-child links. Note: the error nodes are not wired to their corresponding finally guards, because these are shared, and wiring them would create a reverse path from normal control flow into the error nodes, which we want to avoid. The graph also maintains edges corresponding to higher level statements like for-else loops. A node is considered successor of a statement if there is an edge from a node that is lexically a child of that statement to a node that is not. Statement predecessors are analogously defined. Attributes: entry: Node, the entry node exit: FrozenSet[Node, ...], the exit nodes error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised error (errors propagated from function calls are not accounted) index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their predecessor CFG nodes stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their successor CFG nodes", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "ClassDef name:Graph Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:as_dot arg:self arguments arg Assign For Call Call For Call For Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_scalar_event", + "source_code": "def is_scalar_event(self, name='is_scalar_event'):\n with self._name_scope(name):\n return ops.convert_to_tensor(self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name='is_scalar_event')", + "docstring": "Indicates that . Args: name: Python prepended to names of ops created by this function. Returns: is_scalar_event: scalar .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:is_scalar_event arg:self arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "register_state_dict_pre_hook", + "source_code": "def register_state_dict_pre_hook(self, hook):\n handle = RemovableHandle(self._state_dict_pre_hooks)\n self._state_dict_pre_hooks[handle.id] = hook\n return handle", + "docstring": "Register a pre-hook for the :meth: method. It should have the following signature:: hook(module, prefix, keep_vars) -> None The registered hooks can be used to perform pre-processing before the `` call is made.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:register_state_dict_pre_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "size", + "source_code": "def size(self, dim: Optional[int]=None) -> Union[int, torch.SymInt]:\n return self.as_fake().size(dim)", + "docstring": "Returns the size of the tensor (if dim is None) or the size at the dimension dim. The returned size may be a SymInt.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:size arg:self arg:dim arguments arg arg Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_map_prop_with_hue", + "source_code": "def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n if value is default:\n value = plot_kws.pop(name, fallback)\n if 'hue' in self.variables:\n levels = self._hue_map.levels\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n else:\n mapping = {None: value}\n return mapping", + "docstring": "Support pointplot behavior of modifying the marker/linestyle with hue.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_map_prop_with_hue arg:self arg:name arg:value arg:fallback arg:plot_kws arguments arg arg arg arg arg If Compare Assign Call If Compare Assign If Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "FSDPCommContext", + "source_code": "class FSDPCommContext:\n\n def lazy_init(self, device: torch.device):\n self.device_handle = _get_device_handle(device.type)\n high_priority = -1\n self.all_gather_copy_in_stream = self.device_handle.Stream(priority=high_priority)\n self.all_gather_stream = self.device_handle.Stream(priority=high_priority)\n self.reduce_scatter_stream = self.device_handle.Stream(priority=high_priority)\n self.all_reduce_stream = self.device_handle.Stream()\n self.all_gather_state: Optional[AllGatherState] = None\n self.reduce_scatter_state: Optional[ReduceScatterState] = None\n self.post_forward_order: list[FSDPParamGroup] = []\n\n def get_all_gather_streams(self, async_op: bool, training_state: TrainingState) -> tuple[torch.Stream, torch.Stream]:\n if not async_op and training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD):\n return (self.all_gather_copy_in_stream, self.all_gather_stream)\n current_stream = self.device_handle.current_stream()\n return (current_stream, current_stream)", + "docstring": "This has the communication state shared across FSDP states/parameter groups.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param_group.py", + "ast_data": "ClassDef name:FSDPCommContext FunctionDef name:lazy_init arg:self arg:device arguments arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call FunctionDef name:get_all_gather_streams arg:self arg:async_op arg:training_state arguments arg arg arg If BoolOp Compare Return return:yes Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "output", + "source_code": "def output(self, u, t, x0=None):\n return dlsim(self, u, t, x0=x0)", + "docstring": "Return the response of the discrete-time system to input . See for details.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:output arg:self arg:u arg:t arg:x0 arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "process_files", + "source_code": "def process_files(self, file_list):\n file_groups = {}\n for translatable in file_list:\n file_group = file_groups.setdefault(translatable.locale_dir, [])\n file_group.append(translatable)\n for locale_dir, files in file_groups.items():\n self.process_locale_dir(locale_dir, files)", + "docstring": "Group translatable files by locale directory and run pot file build process for each group.", + "type": "method", + "file_path": "django\\django\\core\\management\\commands\\makemessages.py", + "ast_data": "FunctionDef name:process_files arg:self arg:file_list arguments arg arg Assign For Assign Call Call For Call Call" + }, + { + "library": "django", + "name": "_check_list_max_show_all", + "source_code": "def _check_list_max_show_all(self, obj):\n if not isinstance(obj.list_max_show_all, int):\n return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')\n else:\n return []", + "docstring": "Check that list_max_show_all is an integer.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_list_max_show_all arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "RemoveNoneInputStep", + "source_code": "class RemoveNoneInputStep(InputAdaptStep):\n\n def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n assert not model_kwargs\n return (tuple((arg for arg in model_args if arg is not None)), {})", + "docstring": "Remove from arguments. This adapt step assumes `None` inside nested collections.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "ClassDef name:RemoveNoneInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Compare" + }, + { + "library": "cherrypy", + "name": "patched_path", + "source_code": "def patched_path(path):\n if not path.endswith('/'):\n path += '/'\n if path.startswith('/RPC2/'):\n path = path[5:]\n return path", + "docstring": "Return 'path', doctored for RPC.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py", + "ast_data": "FunctionDef name:patched_path arg:path arguments arg If Call If Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "svd_lowrank", + "source_code": "def svd_lowrank(A: Tensor, q: Optional[int]=6, niter: Optional[int]=2, M: Optional[Tensor]=None) -> tuple[Tensor, Tensor, Tensor]:\n if not torch.jit.is_scripting():\n tensor_ops = (A, M)\n if not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops):\n return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)\n return _svd_lowrank(A, q=q, niter=niter, M=M)", + "docstring": "Return the singular value decomposition `AA \\approx U \\operatorname{diag}(S) V^{\\text{H}}MA - MAQk `_).", + "type": "function", + "file_path": "pytorch\\torch\\_lowrank.py", + "ast_data": "FunctionDef name:svd_lowrank arg:A arg:q arg:niter arg:M arguments arg arg arg arg If Call Assign If BoolOp Call Call Call Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "polymul", + "source_code": "def polymul(c1, c2):\n [c1, c2] = pu.as_series([c1, c2])\n ret = np.convolve(c1, c2)\n return pu.trimseq(ret)", + "docstring": "Multiply one polynomial by another. Returns the product of two polynomials * . The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents the polynomial `` Parameters ---------- c1, c2 : array_like 1-D arrays of coefficients representing a polynomial, relative to the \"standard\" basis, and ordered from lowest order term to highest. Returns ------- out : ndarray Of the coefficients of their product. See Also -------- polyadd, polysub, polymulx, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> P.polymul(c1, c2) array([ 3., 8., 14., 8., 3.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polynomial.py", + "ast_data": "FunctionDef name:polymul arg:c1 arg:c2 arguments arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_should_pack", + "source_code": "def _should_pack(arg):\n return isinstance(arg, list)", + "docstring": "Determines whether the caller needs to pack the argument in a tuple. If user-defined function returns a list of tensors, and and would conspire to attempt to stack those tensors into a single tensor because the tf.data version of does not recurse into lists. Since it is more likely that the list arose from returning the result of an operation (such as ) that returns a list of not-necessarily-stackable tensors, we treat the returned value as a instead. A user wishing to pack the return value into a single tensor can use an explicit before returning. Args: arg: argument to check Returns: Indication of whether the caller needs to pack the argument in a tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\structured_function.py", + "ast_data": "FunctionDef name:_should_pack arg:arg arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "bounds", + "source_code": "@property\ndef bounds(self):\n if self.k1.bounds.size == 0:\n return self.k2.bounds\n if self.k2.bounds.size == 0:\n return self.k1.bounds\n return np.vstack((self.k1.bounds, self.k2.bounds))", + "docstring": "Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:bounds arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "block_stmt", + "source_code": "def block_stmt(stmt: str, indent: int=0) -> str:\n block_size = 100\n loop_count = number // block_size\n if loop_count == 1:\n loop_count = 0\n remainder = number - block_size * loop_count\n blocked_stmt = ''\n if loop_count:\n unrolled_stmts = textwrap.indent('\\n'.join([stmt] * block_size), ' ' * 4)\n blocked_stmt += f'for _ in range({loop_count}):\\n{unrolled_stmts}\\n'\n if remainder:\n blocked_stmt += '\\n'.join([stmt] * remainder)\n return textwrap.indent(blocked_stmt, ' ' * indent)", + "docstring": "Partially unroll benchmark loop. The naive template looks something like: \"for _ in range({number}): {stmt}\" However a loop in Python is surprisingly expensive, and significantly increases the number of background Python instructions. So instead we partially unroll the loops, with a block size of 100 chosen to keep the instruction overhead from low while also not ballooning the size of the generated file.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", + "ast_data": "FunctionDef name:block_stmt arg:stmt arg:indent arguments arg arg Assign Assign If Compare Assign Assign Assign If Assign Call Call If Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "merge_loops", + "source_code": "def merge_loops(self) -> LoopBody:\n old_body = self\n old_sizes = self.sizes\n old_iter_vars, old_reduce_vars = old_body.vars\n old_iter_sizes, old_reduce_sizes = old_sizes\n index_exprs = [*old_body.indexing_exprs.values()]\n iter_sizes, iter_reindex, _ = V.graph.sizevars._simplify_loops(old_iter_vars, old_iter_sizes, index_prevent_reordering(index_exprs, old_iter_vars, old_iter_sizes))\n reduce_sizes, reduce_reindex, _ = V.graph.sizevars._simplify_loops(old_reduce_vars, old_reduce_sizes, index_prevent_reordering(index_exprs, old_reduce_vars, old_reduce_sizes))\n (iter_vars, reduce_vars), var_ranges = dependencies.index_vars_no_squeeze(iter_sizes, reduce_sizes, prefix='t')\n new_body = LoopBody(old_body, [iter_reindex(iter_vars), reduce_reindex(reduce_vars)], var_ranges, iter_vars, reduce_vars)\n (iter_vars2, reduce_vars2), var_ranges2 = dependencies.index_vars_no_squeeze(iter_sizes, reduce_sizes, prefix='p')\n new_body2 = LoopBody(new_body, (iter_vars2, reduce_vars2), var_ranges2, iter_vars2, reduce_vars2)\n return new_body2", + "docstring": "Merge both iteration and reduction loops and return a new LoopBody.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\loop_body.py", + "ast_data": "FunctionDef name:merge_loops arg:self arguments arg Assign Assign Assign Assign Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_bin_numbers", + "source_code": "def _bin_numbers(sample, nbin, edges, dedges):\n Dlen, Ndim = sample.shape\n sampBin = [np.digitize(sample[:, i], edges[i]) for i in range(Ndim)]\n for i in range(Ndim):\n dedges_min = dedges[i].min()\n if dedges_min == 0:\n raise ValueError('The smallest edge difference is numerically 0.')\n decimal = int(-np.log10(dedges_min)) + 6\n on_edge = np.where((sample[:, i] >= edges[i][-1]) & (np.around(sample[:, i], decimal) == np.around(edges[i][-1], decimal)))[0]\n sampBin[i][on_edge] -= 1\n binnumbers = np.ravel_multi_index(sampBin, nbin)\n return binnumbers", + "docstring": "Compute the bin number each sample falls into, in each dimension", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_binned_statistic.py", + "ast_data": "FunctionDef name:_bin_numbers arg:sample arg:nbin arg:edges arg:dedges arguments arg arg arg arg Assign Assign Call Call For Call Assign Call If Compare Raise Call Assign Call Call Assign Call Compare Compare Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "remove", + "source_code": "def remove(self):\n if self._remove_method is not None:\n self._remove_method(self)\n self.stale_callback = None\n _ax_flag = False\n if hasattr(self, 'axes') and self.axes:\n self.axes._mouseover_set.discard(self)\n self.axes.stale = True\n self.axes = None\n _ax_flag = True\n if (fig := self.get_figure(root=False)) is not None:\n if not _ax_flag:\n fig.stale = True\n self._parent_figure = None\n else:\n raise NotImplementedError('cannot remove artist')", + "docstring": "Remove the artist from the figure if possible. The effect will not be visible until the figure is redrawn, e.g., with . Call to update the Axes limits if desired. Note: will not see collections even if the collection was added to the Axes with *autolim* = True. Note: there is no support for removing the artist's legend entry.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:remove arg:self arguments arg If Compare Call Assign Assign If BoolOp Call Call Assign Assign Assign If Compare Call If Assign Assign Raise Call" + }, + { + "library": "pandas", + "name": "value_counts", + "source_code": "def value_counts(self, dropna: bool=True) -> Series:\n pa_type = self._pa_array.type\n if pa_version_under11p0 and pa.types.is_duration(pa_type):\n data = self._pa_array.cast(pa.int64())\n else:\n data = self._pa_array\n from pandas import Index, Series\n vc = data.value_counts()\n values = vc.field(0)\n counts = vc.field(1)\n if dropna and data.null_count > 0:\n mask = values.is_valid()\n values = values.filter(mask)\n counts = counts.filter(mask)\n if pa_version_under11p0 and pa.types.is_duration(pa_type):\n values = values.cast(pa_type)\n counts = ArrowExtensionArray(counts)\n index = Index(type(self)(values))\n return Series(counts, index=index, name='count', copy=False)", + "docstring": "Return a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign If BoolOp Call Assign Call Call Assign Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Assign Call If BoolOp Call Assign Call Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, fetches, feed_dict, run_options, run_metadata, run_call_count, is_callable_runner=False):\n self.fetches = fetches\n self.feed_dict = feed_dict\n self.run_options = run_options\n self.run_metadata = run_metadata\n self.run_call_count = run_call_count\n self.is_callable_runner = is_callable_runner", + "docstring": "Constructor of . Args: fetches: Fetch targets of the run() call. feed_dict: The feed dictionary to the run() call. run_options: RunOptions input to the run() call. run_metadata: RunMetadata input to the run() call. The above four arguments are identical to the input arguments to the run() method of a non-wrapped TensorFlow session. run_call_count: 1-based count of how many run calls (including this one) has been invoked. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fetches arg:feed_dict arg:run_options arg:run_metadata arg:run_call_count arg:is_callable_runner arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "transpose_v2", + "source_code": "@tf_export('transpose', v1=[])\n@dispatch.add_dispatch_support\ndef transpose_v2(a, perm=None, conjugate=False, name='transpose'):\n return transpose(a=a, perm=perm, name=name, conjugate=conjugate)", + "docstring": "Transposes , where is a Tensor. Permutes the dimensions according to the value of . The returned tensor's dimension will correspond to the input dimension . If is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence, by default, this operation performs a regular matrix transpose on 2-D input Tensors. If conjugate is and is either or then the values of are conjugated and transposed. @compatibility(numpy) In transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted . TensorFlow does not support strides, so returns a new tensor with the items permuted. @end_compatibility For example: >>> x = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.transpose(x) Equivalently, you could call . If is complex, setting conjugate=True gives the conjugate transpose: >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], ... [4 + 4j, 5 + 5j, 6 + 6j]]) >>> tf.transpose(x, conjugate=True) 'perm' is more useful for n-dimensional tensors where n > 2: >>> x = tf.constant([[[ 1, 2, 3], ... [ 4, 5, 6]], ... [[ 7, 8, 9], ... [10, 11, 12]]]) As above, simply calling will default to . To take the transpose of the matrices in dimension-0 (such as when you are transposing matrices where 0 is the batch dimension), you would set . >>> tf.transpose(x, perm=[0, 2, 1]) Note: This has a shorthand ): Args: a: A . perm: A permutation of the dimensions of . This should be a vector. conjugate: Optional bool. Setting it to is mathematically equivalent to tf.math.conj(tf.transpose(input)). name: A name for the operation (optional). Returns: A transposed .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:transpose_v2 arg:a arg:perm arg:conjugate arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_UnsortedSegmentMinOrMaxGrad", + "source_code": "def _UnsortedSegmentMinOrMaxGrad(op: ops.Operation, grad):\n gathered_outputs, zero_clipped_indices, is_positive = _GatherDropNegatives(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n is_selected = math_ops.logical_and(is_selected, is_positive)\n num_selected = math_ops.unsorted_segment_sum(math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])\n weighted_grads = math_ops.divide(grad, num_selected)\n gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None, zero_clipped_indices, is_positive)\n zeros = array_ops.zeros_like(gathered_grads)\n return (array_ops.where_v2(is_selected, gathered_grads, zeros), None, None)", + "docstring": "Gradient for UnsortedSegmentMin and UnsortedSegmentMax.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_UnsortedSegmentMinOrMaxGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "deregister", + "source_code": "def deregister() -> None:\n plot_backend = _get_plot_backend('matplotlib')\n plot_backend.deregister()", + "docstring": "Remove pandas formatters and converters. Removes the custom converters added by :func:. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``, are restored to their original value. See Also -------- register_matplotlib_converters : Register pandas formatters and converters with matplotlib. Examples -------- .. plot:: :context: close-figs The following line is done automatically by pandas so the plot can be rendered: >>> pd.plotting.register_matplotlib_converters() >>> df = pd.DataFrame( ... {\"ts\": pd.period_range(\"2020\", periods=2, freq=\"M\"), \"y\": [1, 2]} ... ) >>> plot = df.plot.line(x=\"ts\", y=\"y\") Unsetting the register manually an error will be raised: >>> pd.set_option( ... \"plotting.matplotlib.register_converters\", False ... ) # doctest: +SKIP >>> df.plot.line(x=\"ts\", y=\"y\") # doctest: +SKIP Traceback (most recent call last): TypeError: float() argument must be a string or a real number, not 'Period'", + "type": "function", + "file_path": "pandas\\pandas\\plotting\\_misc.py", + "ast_data": "FunctionDef name:deregister arguments Assign Call Call" + }, + { + "library": "cryptography", + "name": "algorithm", + "source_code": "@property\n@abc.abstractmethod\ndef algorithm(self) -> asym_utils.Prehashed | hashes.HashAlgorithm:\n pass", + "docstring": "The digest algorithm used with this signature.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:algorithm arg:self arguments arg" + }, + { + "library": "seaborn", + "name": "tick", + "source_code": "def tick(self, locator: Locator | None=None) -> Nominal:\n new = copy(self)\n new._tick_params = {'locator': locator}\n return new", + "docstring": "Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. Returns ------- Copy of self with new tick configuration.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\scales.py", + "ast_data": "FunctionDef name:tick arg:self arg:locator arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "prepare_global_plan", + "source_code": "@abc.abstractmethod\ndef prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:\n pass", + "docstring": "Perform centralized planning of storage. This method is only called on the coordinator instance. While this method can produce a completely different plan, the preferred way is to store storage specific data in SavePlan::storage_data. Args: plans: A list of `` after storage global planning", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", + "ast_data": "FunctionDef name:prepare_global_plan arg:self arg:plans arguments arg arg" + }, + { + "library": "tensorflow", + "name": "pool_v2", + "source_code": "@tf_export('nn.pool', v1=[])\n@dispatch.add_dispatch_support\ndef pool_v2(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None):\n return pool(input=input, window_shape=window_shape, pooling_type=pooling_type, padding=padding, dilation_rate=dilations, strides=strides, name=name, data_format=data_format)", + "docstring": "Performs an N-D pooling operation. In the case that does not start with \"NC\", computes for 0 = 1. pooling_type: Specifies pooling operation, must be \"AVG\" or \"MAX\". strides: Optional. Sequence of N ints >= 1. Defaults to . If any value of strides is > 1, then all values of dilation_rate must be 1. padding: The padding algorithm, must be \"SAME\" or \"VALID\". Defaults to \"SAME\". See [here]( for more information. data_format: A string or None. Specifies whether the channel dimension of the and output is the last dimension (default, or if does not start with \"NC\"), or the second dimension (if starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\". For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\". dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to . If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional. Name of the op. Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with \"NC\", or [batch_size, num_channels] + output_spatial_shape if data_format starts with \"NC\", where depends on the value of padding: If padding = \"SAME\": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = \"VALID\": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:pool_v2 arg:input arg:window_shape arg:pooling_type arg:strides arg:padding arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "get_sprites_at", + "source_code": "def get_sprites_at(self, pos):\n _sprites = self._spritelist\n rect = Rect(pos, (1, 1))\n colliding_idx = rect.collidelistall(_sprites)\n return [_sprites[i] for i in colliding_idx]", + "docstring": "return a list with all sprites at that position LayeredUpdates.get_sprites_at(pos): return colliding_sprites Bottom sprites are listed first; the top ones are listed last.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_sprites_at arg:self arg:pos arguments arg arg Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_params", + "source_code": "def get_params(self, deep=True):\n return dict(kernels=self.kernels)", + "docstring": "Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_default_config", + "source_code": "def get_default_config(self, row):\n return None", + "docstring": "Returns the default config for a given sample. The default config could for example be the config that is the chosen by a current handwritten heuristic. This can for example be used in get_unsafe_leaf to compare the predicted config with the default config.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", + "ast_data": "FunctionDef name:get_default_config arg:self arg:row arguments arg arg Return return:no" + }, + { + "library": "pytorch", + "name": "match", + "source_code": "def match(self, graph: Graph) -> list[InternalMatch]:\n internal_matches = super().match(graph)\n for internal_match in internal_matches:\n for k, n in self.name_node_map.items():\n internal_match.name_node_map[k] = internal_match.nodes_map[n]\n return internal_matches", + "docstring": "The returned InternalMatch will have name_node_map populated with a map from node name (str) to the target node, e.g. {\"conv\": target_conv_ndoe, \"relu\": target_relu_node} this requires the pattern graph returns an additional output of node name to node, e.g. instead of: we should do: instead", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\utils\\matcher_with_name_node_map_utils.py", + "ast_data": "FunctionDef name:match arg:self arg:graph arguments arg arg Assign Call Call For For Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, estimate_mode_type: str) -> Self:\n if estimate_mode_type == 'operator-level-benchmark':\n self._estimate = RuntimeEstimator._benchmark_estimate\n elif estimate_mode_type == 'operator-level-cost-model':\n self._estimate = RuntimeEstimator._roofline_estimate\n else:\n raise NotImplementedError(f'estimate_mode_type {estimate_mode_type} not supported')\n self._estimate_mode_type = estimate_mode_type\n return self", + "docstring": "Sets the estimate mode type. Currently supported modes: - \"operator-level-benchmark\": Estimates runtime using operator benchmarking. - \"operator-level-cost-model\": Estimates runtime using roofline cost model. Args: estimate_mode_type (str): The type of estimate mode to use. Returns: RuntimeEstimator: The runtime estimator instance. Raises: NotImplementedError: If the estimate mode type is not supported.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\runtime_estimator.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:estimate_mode_type arguments arg arg If Compare Assign If Compare Assign Raise Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_n_features_out", + "source_code": "@property\ndef _n_features_out(self):\n return self.components_.shape[0]", + "docstring": "Number of transformed output features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py", + "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "Highlight", + "source_code": "class Highlight(SphinxDirective):\n has_content = False\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec: ClassVar[OptionSpec] = {'force': directives.flag, 'linenothreshold': directives.positive_int}\n\n def run(self) -> list[Node]:\n language = self.arguments[0].strip()\n linenothreshold = self.options.get('linenothreshold', sys.maxsize)\n force = 'force' in self.options\n self.env.current_document.highlight_language = language\n return [addnodes.highlightlang(lang=language, force=force, linenothreshold=linenothreshold)]", + "docstring": "Directive to set the highlighting language for code blocks, as well as the threshold for line numbers.", + "type": "class", + "file_path": "sphinx\\sphinx\\directives\\code.py", + "ast_data": "ClassDef name:Highlight Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Assign Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_process_logs", + "source_code": "def _process_logs(self, logs, is_batch_hook=False):\n if logs is None:\n return {}\n if self._supports_tf_logs:\n return logs\n if is_batch_hook and self._batch_hooks_support_tf_logs:\n return logs\n return tf_utils.sync_to_numpy_or_python_type(logs)", + "docstring": "Turns tensors into numpy arrays or Python scalars if necessary.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_process_logs arg:self arg:logs arg:is_batch_hook arguments arg arg arg If Compare Return return:no If Return return:yes If BoolOp Return return:yes Return return:yes Call" + }, + { + "library": "pygame", + "name": "stop", + "source_code": "def stop(self):\n pass", + "docstring": "Not implemented.", + "type": "method", + "file_path": "pygame\\src_py\\_camera_vidcapture.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg" + }, + { + "library": "sphinx", + "name": "GenericObject", + "source_code": "class GenericObject(ObjectDescription[str]):\n indextemplate: str = ''\n parse_node: Callable[[BuildEnvironment, str, desc_signature], str] | None = None\n\n def handle_signature(self, sig: str, signode: desc_signature) -> str:\n if self.parse_node:\n name = self.parse_node(self.env, sig, signode)\n else:\n signode.clear()\n signode += addnodes.desc_name(sig, sig)\n name = ws_re.sub(' ', sig)\n return name\n\n def add_target_and_index(self, name: str, sig: str, signode: desc_signature) -> None:\n node_id = make_id(self.env, self.state.document, self.objtype, name)\n signode['ids'].append(node_id)\n self.state.document.note_explicit_target(signode)\n if self.indextemplate:\n colon = self.indextemplate.find(':')\n if colon != -1:\n indextype = self.indextemplate[:colon].strip()\n indexentry = self.indextemplate[colon + 1:].strip() % (name,)\n else:\n indextype = 'single'\n indexentry = self.indextemplate % (name,)\n self.indexnode['entries'].append((indextype, indexentry, node_id, '', None))\n std = self.env.domains.standard_domain\n std.note_object(self.objtype, name, node_id, location=signode)", + "docstring": "A generic x-ref directive registered with Sphinx.add_object_type().", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", + "ast_data": "ClassDef name:GenericObject FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg If Assign Call Call Call Assign Call Return return:yes FunctionDef name:add_target_and_index arg:self arg:name arg:sig arg:signode arguments arg arg arg arg Assign Call Call Call If Assign Call If Compare Assign Call Assign Call Assign Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "convert_nested_bidirectional", + "source_code": "def convert_nested_bidirectional(weights):\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend)\n backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend)\n return forward_weights + backward_weights", + "docstring": "Converts layers nested in wrapper. This function uses for converting layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py", + "ast_data": "FunctionDef name:convert_nested_bidirectional arg:weights arguments arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "tocoo", + "source_code": "def tocoo(self, copy=False):\n return self.tocsr(copy=False).tocoo(copy=copy)", + "docstring": "Convert this array/matrix to COOrdinate format. With copy=False, the data/indices may be shared between this array/matrix and the resultant coo_array/matrix.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:tocoo arg:self arg:copy arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_set_oob_score_and_attributes", + "source_code": "def _set_oob_score_and_attributes(self, X, y, scoring_function=None):\n self.oob_decision_function_ = super()._compute_oob_predictions(X, y)\n if self.oob_decision_function_.shape[-1] == 1:\n self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)\n if scoring_function is None:\n scoring_function = accuracy_score\n self.oob_score_ = scoring_function(y, np.argmax(self.oob_decision_function_, axis=1))", + "docstring": "Compute and set the OOB score and attributes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples, n_outputs) The target matrix. scoring_function : callable, default=None Scoring function for OOB score. Defaults to .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py", + "ast_data": "FunctionDef name:_set_oob_score_and_attributes arg:self arg:X arg:y arg:scoring_function arguments arg arg arg arg Assign Call Call If Compare Assign Call If Compare Assign Assign Call Call" + }, + { + "library": "authlib", + "name": "validate_request_parameter_supported", + "source_code": "def validate_request_parameter_supported(self):\n _validate_boolean_value(self, 'request_parameter_supported')", + "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. If omitted, the default value is false.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", + "ast_data": "FunctionDef name:validate_request_parameter_supported arg:self arguments arg Call" + }, + { + "library": "matplotlib", + "name": "set_alpha", + "source_code": "def set_alpha(self, alpha):\n self.alpha = None if isinstance(alpha, np.ndarray) else alpha", + "docstring": "Set the transparency between 0 (transparent) and 1 (opaque). If an array is provided, *alpha* will be set to None to use the transparency values associated with the colormap.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg Assign Call" + }, + { + "library": "pytorch", + "name": "celu", + "source_code": "def celu(input: Tensor, alpha: float=1.0, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace)\n if inplace:\n result = torch.celu_(input, alpha)\n else:\n result = torch.celu(input, alpha)\n return result", + "docstring": "celu(input, alpha=1., inplace=False) -> Tensor Applies element-wise, :math:. See :class: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:celu arg:input arg:alpha arg:inplace arguments arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "dense", + "source_code": "def dense(inputs, units, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None):\n warnings.warn('`tf.layers.dense` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Dense` instead.')\n layer = Dense(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _scope=name, _reuse=reuse)\n return layer.apply(inputs)", + "docstring": "Functional interface for the densely-connected layer. This layer implements the operation: where is the activation function passed as the argument (if not ), is a weights matrix created by the layer, and is a bias vector created by the layer (only if is ). Args: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If (default), weights are initialized using the default initializer used by . bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an . trainable: Boolean, if also add variables to the graph collection (see ). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor the same shape as except the last dimension is of size . Raises: ValueError: if eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py", + "ast_data": "FunctionDef name:dense arg:inputs arg:units arg:activation arg:use_bias arg:kernel_initializer arg:bias_initializer arg:kernel_regularizer arg:bias_regularizer arg:activity_regularizer arg:kernel_constraint arg:bias_constraint arg:trainable arg:name arg:reuse arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "IsMerge", + "source_code": "def IsMerge(op):\n return op.type == 'Merge' or op.type == 'RefMerge'", + "docstring": "Return true if is a Merge.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:IsMerge arg:op arguments arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "scatter", + "source_code": "@tf_should_use.should_use_result\ndef scatter(self, indices, value, name=None):\n with ops.name_scope(name, 'TensorArrayScatter', [self._handle, value, indices]):\n value = ops.convert_to_tensor(value, preferred_dtype=self._dtype, name='value')\n _check_dtypes(value, self._dtype)\n if not context.executing_eagerly():\n self._check_element_shape(value.shape[1:])\n with self._maybe_colocate_with(value):\n flow_out = gen_data_flow_ops.tensor_array_scatter_v3(handle=self._handle, indices=indices, value=value, flow_in=self._flow, name=name)\n return build_ta_with_new_flow(self, flow_out)", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:scatter arg:self arg:indices arg:value arg:name arguments arg arg arg arg With Call Assign Call Call If Call Call With Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "update", + "source_code": "def update(self, j, est):\n do_oob = est.subsample < 1\n i = j - self.begin_at_stage\n if (i + 1) % self.verbose_mod == 0:\n oob_impr = est.oob_improvement_[j] if do_oob else 0\n remaining_time = (est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)\n if remaining_time > 60:\n remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)\n else:\n remaining_time = '{0:.2f}s'.format(remaining_time)\n print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time))\n if self.verbose == 1 and (i + 1) // (self.verbose_mod * 10) > 0:\n self.verbose_mod *= 10", + "docstring": "Update reporter with new iteration. Parameters ---------- j : int The new iteration. est : Estimator The estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:update arg:self arg:j arg:est arguments arg arg arg Assign Compare Assign If Compare Assign Assign Call Call If Compare Assign Call Assign Call Call Call If BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "batch_gather", + "source_code": "@dispatch.dispatch_for_api(array_ops.batch_gather)\ndef batch_gather(params: ragged_tensor.RaggedOrDense, indices: ragged_tensor.RaggedOrDense, name=None):\n return ragged_gather_ops.gather(params, indices, batch_dims=-1, name=name)", + "docstring": "Gathers slices from according to with batch dims. This operation is similar to , but it assumes that the leading dimensions of and are batch dimensions, and performs a gather within each batch. In particular, when using this operation with batch dimensions : * has shape * has shape . * has shape . * Args: params: A potentially ragged tensor with shape (, ). indices: A potentially ragged tensor with shape (). name: A name for the operation (optional). Returns: A potentially ragged tensor with shape . . #### Example: >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) >>> tf.compat.v1.batch_gather(params, indices)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_batch_gather_ops.py", + "ast_data": "FunctionDef name:batch_gather arg:params arg:indices arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_qconfig_info", + "source_code": "def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n dynamic_static_info = self._generate_dict_info(model)\n module_fqn_to_detector_qconfig_info = {}\n for module_fqn in dynamic_static_info:\n detector_qconfig_info = DetectorQConfigInfo(module_fqn)\n dynamic_static_recommended: bool = dynamic_static_info[module_fqn][self.DEFAULT_DYNAMIC_REC_KEY]\n detector_qconfig_info.is_activation_dynamic = dynamic_static_recommended\n module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info\n return module_fqn_to_detector_qconfig_info", + "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg Assign Call Assign For Assign Call Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_dual_gap", + "source_code": "def _dual_gap(emp_cov, precision_, alpha):\n gap = np.sum(emp_cov * precision_)\n gap -= precision_.shape[0]\n gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())\n return gap", + "docstring": "Expression of the dual gap convergence criterion The specific definition is given in Duchi \"Projected Subgradient Methods for Learning Sparse Gaussians\".", + "type": "function", + "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py", + "ast_data": "FunctionDef name:_dual_gap arg:emp_cov arg:precision_ arg:alpha arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "WhileCondFuncGraph", + "source_code": "class WhileCondFuncGraph(ControlFlowFuncGraph):\n pass", + "docstring": "FuncGraph for the condition of tf.while_loop(). This is used to distinguish while conditions from other functions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py", + "ast_data": "ClassDef name:WhileCondFuncGraph" + }, + { + "library": "tensorflow", + "name": "_get_distribution", + "source_code": "def _get_distribution(old_value):\n dist = pasta.parse('\"uniform\" if old_value else \"truncated_normal\"')\n ifexpr = dist.body[0].value\n pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)\n pasta.base.formatting.set(dist, 'prefix', '(')\n pasta.base.formatting.set(dist, 'suffix', ')')\n return dist", + "docstring": "Returns an AST matching the following: (\"uniform\" if (old_value) else \"truncated_normal\")", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py", + "ast_data": "FunctionDef name:_get_distribution arg:old_value arguments arg Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "state", + "source_code": "@state.setter\ndef state(self, value):\n self._state = value\n event = self._get_state_event(value)\n win32event.PulseEvent(event)", + "docstring": "Set the bus state.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\win32.py", + "ast_data": "FunctionDef name:state arg:self arg:value arguments arg arg Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "number_of_tuple_elements", + "source_code": "@property\ndef number_of_tuple_elements(self):\n return len(self._sharding_policies)", + "docstring": "Returns the number of InfeedQueue tuple elements.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:number_of_tuple_elements arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):\n self.conv_result = conv_result\n self.input_var = input_var\n self.c_out = c_out\n self.kernel = kernel\n self.padding = padding\n self.stride = stride\n self.dilation = dilation\n self.matching_constraint = matching_constraint_vars", + "docstring": ":param conv_result: the convolution result :param input_var: input to convolution :param c_out: output chanel type :param kernel: kernel tuple", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:conv_result arg:input_var arg:c_out arg:kernel arg:padding arg:stride arg:dilation arg:matching_constraint_vars arguments arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "scipy", + "name": "getrow", + "source_code": "def getrow(self, i):\n return self._getrow(i)", + "docstring": "Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_matrix.py", + "ast_data": "FunctionDef name:getrow arg:self arg:i arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "__getitem__", + "source_code": "def __getitem__(self, index):\n if isinstance(index, str):\n i = self.index(index)\n elif 0 <= index < self.num_fields:\n i = index\n else:\n raise IndexError('Index out of range when accessing field in a feature: %s.' % index)\n return Field(self, i)", + "docstring": "Get the Field object at the specified index, which may be either an integer or the Field's string label. Note that the Field object is not the field's _value_ -- use the method instead to retrieve the value (e.g. an integer) instead of a Field instance.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Call Assign Call If Compare Assign Raise Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "The serialized bytes of the public key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "_get_handle_deleter", + "source_code": "def _get_handle_deleter(graph, deleter_key, handle):\n result = graph._handle_deleters.get(deleter_key)\n if result is None:\n handle_device = TensorHandle._get_device_name(handle)\n with graph.as_default(), graph.device(handle_device):\n holder = array_ops.placeholder(dtypes.string)\n deleter = gen_data_flow_ops.delete_session_tensor(holder)\n result = (holder, deleter)\n graph._handle_deleters[deleter_key] = result\n return result", + "docstring": "Return a deletion subgraph for this handle.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:_get_handle_deleter arg:graph arg:deleter_key arg:handle arguments arg arg arg Assign Call If Compare Assign Call With Call Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "x1", + "source_code": "@property\ndef x1(self):\n return self.get_points()[1, 0]", + "docstring": "The second of the pair of *x* coordinates that define the bounding box. This is not guaranteed to be greater than :attr: (for that, use :attr:).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:x1 arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "_get_non_gfk_field", + "source_code": "def _get_non_gfk_field(opts, name):\n field = opts.get_field(name)\n if field.is_relation and (field.many_to_one and (not field.related_model) or field.one_to_many):\n raise FieldDoesNotExist()\n if field.is_relation and (not field.many_to_many) and hasattr(field, 'attname') and (field.attname == name):\n raise FieldIsAForeignKeyColumnName()\n return field", + "docstring": "For historical reasons, the admin app relies on GenericForeignKeys as being \"not found\" by get_field(). This could likely be cleaned up. Reverse relations should also be excluded as these aren't attributes of the model (rather something like ).", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\utils.py", + "ast_data": "FunctionDef name:_get_non_gfk_field arg:opts arg:name arguments arg arg Assign Call If BoolOp BoolOp BoolOp Raise Call If BoolOp Call Compare Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "contains_point", + "source_code": "def contains_point(self, point, transform=None, radius=0.0):\n if transform is not None:\n transform = transform.frozen()\n if transform and (not transform.is_affine):\n self = transform.transform_path(self)\n transform = None\n return _path.point_in_path(point[0], point[1], radius, self, transform)", + "docstring": "Return whether the area enclosed by the path contains the given point. The path is always treated as closed; i.e. if the last code is not an implicit segment connecting the last vertex to the first vertex is assumed. Parameters ---------- point : (float, float) The point (x, y) to check. transform : , optional If not `` transformed by *transform*; i.e. for a correct check, *transform* should transform the path into the coordinate system of *point*. radius : float, default: 0 Additional margin on the path in coordinates of *point*. The path is extended tangentially by *radius/2*; i.e. if you would draw the path with a linewidth of *radius*, all points on the line would still be considered to be contained in the area. Conversely, negative values shrink the area: Points on the imaginary line will be considered outside the area. Returns ------- bool Notes ----- The current algorithm has some limitations: - The result is undefined for points exactly at the boundary (i.e. at the path shifted by *radius/2*). - The result is undefined if there is no enclosed area, i.e. all vertices are on a straight line. - If bounding lines start to cross each other due to *radius* shift, the result is not guaranteed to be correct.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:contains_point arg:self arg:point arg:transform arg:radius arguments arg arg arg arg If Compare Assign Call If BoolOp Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n super(InverseTimeDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n self.name = name", + "docstring": "Applies inverse time decay to the initial learning rate. Args: initial_learning_rate: A scalar or or a Python number. The initial learning rate. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:decay_rate arg:staircase arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "getsourcefile", + "source_code": "def getsourcefile(object):\n return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.getsourcefile.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:getsourcefile arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "AutogradEngineVariable", + "source_code": "class AutogradEngineVariable(UserDefinedObjectVariable):\n\n def __init__(self, value, value_type=None, **kwargs) -> None:\n super().__init__(value=value, value_type=value_type, **kwargs)\n\n def call_method(self, tx: 'InstructionTranslator', name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n if name == 'queue_callback':\n if torch._dynamo.compiled_autograd.in_compiled_autograd_region:\n assert tx.one_graph, 'queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True'\n return variables.UserFunctionVariable(torch._dynamo.external_utils.FakeCompiledAutogradEngine.queue_callback, source=self.source).call_function(tx, (tx.output.side_effects.get_ca_final_callbacks_var(), *args), kwargs)\n else:\n unimplemented_v2(gb_type='Unsupported torch._C._ImperativeEngine.queue_callback()', context=f'call_method {self} {name}', explanation='queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True.', hints=[])\n else:\n unimplemented_v2(gb_type='Unsupported torch._C._ImperativeEngine method', context=f'call_method {self} {name}', explanation=f'Dynamo only supports the `queue_callback` method on a torch._C._ImperativeEngine instance, but found: `{name}`.', hints=[])", + "docstring": "Represents a torch._C._ImperativeEngine instance.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py", + "ast_data": "ClassDef name:AutogradEngineVariable FunctionDef name:__init__ arg:self arg:value arg:value_type arguments arg arg arg arg Call Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Compare If Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_maybe_store_sparse", + "source_code": "def _maybe_store_sparse(t, map_op_name, keep_input):\n return utils.smart_cond(keep_input, lambda: _store_sparse(t, shared_name=map_op_name), lambda: constant_op.constant(-1, dtypes.int64))", + "docstring": "Conditionally store a single sparse Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:_maybe_store_sparse arg:t arg:map_op_name arg:keep_input arguments arg arg arg Return return:yes Call arguments Call arguments Call" + }, + { + "library": "tensorflow", + "name": "infer_steps_for_dataset", + "source_code": "def infer_steps_for_dataset(model, dataset, steps, epochs=1, steps_name='steps'):\n assert isinstance(dataset, data_types.DatasetV2)\n if model._in_multi_worker_mode() and dataset.options().experimental_distribute.auto_shard_policy != options_lib.AutoShardPolicy.OFF:\n return None\n size = backend.get_value(cardinality.cardinality(dataset))\n if size == cardinality.INFINITE and steps is None:\n raise ValueError('When passing an infinitely repeating dataset, you must specify the `%s` argument.' % (steps_name,))\n if size >= 0:\n if steps is not None and steps * epochs > size:\n if epochs > 1:\n raise ValueError('The dataset you passed contains %s batches, but you passed `epochs=%s` and `%s=%s`, which is a total of %s steps. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, epochs, steps_name, steps, steps * epochs, steps_name, size // epochs))\n else:\n raise ValueError('The dataset you passed contains %s batches, but you passed `%s=%s`. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, steps_name, steps, steps_name, size))\n if steps is None:\n if size >= 0:\n return size\n return None\n return steps", + "docstring": "Infers steps_per_epoch needed to loop through a dataset. Args: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). epochs: Number of times to iterate over the dataset. steps_name: The string name of the steps argument, either , , or . Only used for error message formatting. Returns: Integer or . Inferred number of steps to loop through the dataset. is returned if 1) the size of the dataset is unknown and was not specified, or 2) this is multi-worker training and auto sharding is enabled. Raises: ValueError: In case of invalid argument values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:infer_steps_for_dataset arg:model arg:dataset arg:steps arg:epochs arg:steps_name arguments arg arg arg arg arg Call If BoolOp Call Compare Call Return return:no Assign Call Call If BoolOp Compare Compare Raise Call If Compare If BoolOp Compare Compare If Compare Raise Call Raise Call If Compare If Compare Return return:yes Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "_broadcast_normalize_batch_in_training", + "source_code": "def _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n mean, var = nn.moments(x, reduction_axes, None, None, False)\n target_shape = []\n for axis in range(ndim(x)):\n if axis in reduction_axes:\n target_shape.append(1)\n else:\n target_shape.append(array_ops.shape(x)[axis])\n target_shape = array_ops_stack.stack(target_shape)\n broadcast_mean = array_ops.reshape(mean, target_shape)\n broadcast_var = array_ops.reshape(var, target_shape)\n if gamma is None:\n broadcast_gamma = None\n else:\n broadcast_gamma = array_ops.reshape(gamma, target_shape)\n if beta is None:\n broadcast_beta = None\n else:\n broadcast_beta = array_ops.reshape(beta, target_shape)\n normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon)\n return (normed, mean, var)", + "docstring": "Non-fused, broadcast version of . Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_broadcast_normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg Assign Call Assign For Call Call If Compare Call Call Call Assign Call Assign Call Assign Call If Compare Assign Assign Call If Compare Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "pygame", + "name": "use_arraytype", + "source_code": "def use_arraytype(arraytype):\n warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n arraytype = arraytype.lower()\n if arraytype != 'numpy':\n raise ValueError('invalid array type')", + "docstring": "pygame.sndarray.use_arraytype(arraytype): return None DEPRECATED - only numpy arrays are now supported.", + "type": "function", + "file_path": "pygame\\src_py\\sndarray.py", + "ast_data": "FunctionDef name:use_arraytype arg:arraytype arguments arg Call Call Assign Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "load", + "source_code": "@tf_export('data.experimental.load', v1=[])\n@deprecation.deprecated(None, 'Use `tf.data.Dataset.load(...)` instead.')\ndef load(path, element_spec=None, compression=None, reader_func=None):\n return dataset_ops.Dataset.load(path, element_spec, compression, reader_func)", + "docstring": "Loads a previously saved dataset. Example usage: >>> import tempfile >>> path = os.path.join(tempfile.gettempdir(), \"saved_data\") >>> # Save a dataset >>> dataset = tf.data.Dataset.range(2) >>> tf.data.experimental.save(dataset, path) >>> new_dataset = tf.data.experimental.load(path) >>> for elem in new_dataset: ... print(elem) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(1, shape=(), dtype=int64) If the default option of sharding the saved dataset was used, the element order of the saved dataset will be preserved when loading it. The argument can be used to specify a custom order in which elements should be loaded from the individual shards. The is expected to take a single argument -- a dataset of datasets, each containing elements of one of the shards -- and return a dataset of elements. For example, the order of shards can be shuffled when loading them as follows: Args: path: Required. A path pointing to a previously saved dataset. element_spec: Optional. A nested structure of objects matching the structure of an element of the saved dataset and specifying the type of individual element components. If not provided, the nested structure of saved with the saved dataset is used. Note that this argument is required in graph mode. compression: Optional. The algorithm to use to decompress the data when reading it. Supported options are and . Defaults to . reader_func: Optional. A function to control how to read data from shards. If present, the function will be traced and executed as graph computation. Returns: A instance. Raises: FileNotFoundError: If is not specified and the saved nested structure of can not be located with the saved dataset. ValueError: If is not specified and the method is executed in graph mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\io.py", + "ast_data": "FunctionDef name:load arg:path arg:element_spec arg:compression arg:reader_func arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "asfreq", + "source_code": "@doc(**_shared_doc_kwargs, other='PeriodIndex', other_name='PeriodIndex')\ndef asfreq(self, freq=None, how: str='E') -> Self:\n how = libperiod.validate_end_alias(how)\n if isinstance(freq, BaseOffset) and hasattr(freq, '_period_dtype_code'):\n freq = PeriodDtype(freq)._freqstr\n freq = Period._maybe_convert_freq(freq)\n base1 = self._dtype._dtype_code\n base2 = freq._period_dtype_code\n asi8 = self.asi8\n end = how == 'E'\n if end:\n ordinal = asi8 + self.dtype._n - 1\n else:\n ordinal = asi8\n new_data = period_asfreq_arr(ordinal, base1, base2, end)\n if self._hasna:\n new_data[self._isnan] = iNaT\n dtype = PeriodDtype(freq)\n return type(self)(new_data, dtype=dtype)", + "docstring": "Convert the {klass} to the specified frequency . Equivalent to applying :meth: with the given arguments to each :class: in this {klass}. Parameters ---------- freq : str A frequency. how : str {{'E', 'S'}}, default 'E' Whether the elements should be aligned to the end or start within pa period. * 'E', 'END', or 'FINISH' for end, * 'S', 'START', or 'BEGIN' for start. January 31st ('END') vs. January 1st ('START') for example. Returns ------- {klass} The transformed {klass} with the new frequency. See Also -------- {other}.asfreq: Convert each Period in a {other_name} to the given frequency. Period.asfreq : Convert a :class: object to the given frequency. Examples -------- >>> pidx = pd.period_range(\"2010-01-01\", \"2015-01-01\", freq=\"Y\") >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[Y-DEC]') >>> pidx.asfreq(\"M\") PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]') >>> pidx.asfreq(\"M\", how=\"S\") PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]')", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\period.py", + "ast_data": "FunctionDef name:asfreq arg:self arg:freq arg:how arguments arg arg arg Assign Call If BoolOp Call Call Assign Call Assign Call Assign Assign Assign Assign Compare If Assign Assign Assign Call If Assign Assign Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X, **params):\n _raise_for_params(params, self, 'predict_log_proba')\n check_is_fitted(self)\n if hasattr(self.estimator_, 'predict_log_proba'):\n X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, reset=False)\n if _routing_enabled():\n routed_params = process_routing(self, 'predict_log_proba', **params)\n else:\n routed_params = Bunch()\n routed_params.estimator = Bunch(predict_log_proba=Bunch())\n n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)\n all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_parallel_predict_log_proba)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, params=routed_params.estimator.predict_log_proba) for i in range(n_jobs)))\n log_proba = all_log_proba[0]\n for j in range(1, len(all_log_proba)):\n log_proba = np.logaddexp(log_proba, all_log_proba[j])\n log_proba -= np.log(self.n_estimators)\n else:\n log_proba = np.log(self.predict_proba(X, **params))\n return log_proba", + "docstring": "Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the base estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. **params : dict Parameters routed to the , the or the method of the sub-estimators via the metadata routing API. The routing is tried in the mentioned order depending on whether this method is available on the sub-estimator. .. versionadded:: 1.7 Only available if is set. See :ref: for more details. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg arg Call Call If Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Call Assign For Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None, row_splits_dtype=dtypes.int64, flat_values_spec=None):\n self._shape = tensor_shape.as_shape(shape)\n self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n if flat_values_spec is not None:\n if dtype is None:\n dtype = flat_values_spec.dtype\n elif dtype != flat_values_spec.dtype:\n raise ValueError('dtype must be the same as flat_values_spec.dtype')\n elif dtype is None:\n raise ValueError('At least one of dtype or flat_values_spec must be provided')\n self._dtype = dtypes.as_dtype(dtype)\n self._flat_values_spec = flat_values_spec\n rank = self._shape.ndims\n if ragged_rank is None:\n if rank is None:\n raise ValueError('Must specify ragged_rank or a shape with a known rank.')\n ragged_rank = rank - 1\n self._ragged_rank = ragged_rank\n if not isinstance(self._ragged_rank, int):\n raise TypeError(f'Argument `ragged_rank` must be an int. Received {ragged_rank}.')\n if rank is not None:\n if ragged_rank >= rank:\n raise ValueError(f'Argument `ragged_rank` ({ragged_rank}) must be less than rank ({rank}).')", + "docstring": "Constructs a type specification for a . Args: shape: The shape of the RaggedTensor, or to allow any shape. If a shape is specified, then all ragged dimensions must have size . dtype: of values in the RaggedTensor. ragged_rank: Python integer, the number of times the RaggedTensor's flat_values is partitioned. Defaults to . row_splits_dtype: for the RaggedTensor's tensor. One of or . flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be provided when the flat_values is a CompositeTensor rather then Tensor. If both and and are provided, must be the same as . (experimental)", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:shape arg:dtype arg:ragged_rank arg:row_splits_dtype arg:flat_values_spec arguments arg arg arg arg arg arg Assign Call Assign Call If Compare If Compare Assign If Compare Raise Call If Compare Raise Call Assign Call Assign Assign If Compare If Compare Raise Call Assign Assign If Call Raise Call If Compare If Compare Raise Call" + }, + { + "library": "django", + "name": "class_or_instance_method", + "source_code": "class class_or_instance_method:\n\n def __init__(self, class_method, instance_method):\n self.class_method = class_method\n self.instance_method = instance_method\n\n def __get__(self, instance, owner):\n if instance is None:\n return functools.partial(self.class_method, owner)\n return functools.partial(self.instance_method, instance)", + "docstring": "Hook used in RegisterLookupMixin to return partial functions depending on the caller type (instance or class of models.Field).", + "type": "class", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "ClassDef name:class_or_instance_method FunctionDef name:__init__ arg:self arg:class_method arg:instance_method arguments arg arg arg Assign Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_set_ddp_sink_clone", + "source_code": "def _set_ddp_sink_clone(self, val: bool):\n self._ddp_sink_clone = val", + "docstring": "Sets whether or not DDPSink should clone the output tensors or not. The default is True since if the loss is modified in place we run into the view is modified in-place error. Although, cloning the tensors can add significant memory and performance hit if the number and size of tensors are large. As a result, this can be set to False if you are not modifying the loss in place.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_set_ddp_sink_clone arg:self arg:val arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "_condition_3_13", + "source_code": "def _condition_3_13(A_1_norm, n0, m_max, ell):\n p_max = _compute_p_max(m_max)\n a = 2 * ell * p_max * (p_max + 3)\n b = _theta[m_max] / float(n0 * m_max)\n return A_1_norm <= a * b", + "docstring": "A helper function for the _expm_multiply_* functions. Parameters ---------- A_1_norm : float The precomputed 1-norm of A. n0 : int Number of columns in the _expm_multiply_* B matrix. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- value : bool Indicates whether or not the condition has been met. Notes ----- This is condition (3.13) in Al-Mohy and Higham (2011).", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py", + "ast_data": "FunctionDef name:_condition_3_13 arg:A_1_norm arg:n0 arg:m_max arg:ell arguments arg arg arg arg Assign Call Assign Assign Call Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "_kl_divergence_bh", + "source_code": "def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False, compute_error=True, num_threads=1):\n params = params.astype(np.float32, copy=False)\n X_embedded = params.reshape(n_samples, n_components)\n val_P = P.data.astype(np.float32, copy=False)\n neighbors = P.indices.astype(np.int64, copy=False)\n indptr = P.indptr.astype(np.int64, copy=False)\n grad = np.zeros(X_embedded.shape, dtype=np.float32)\n error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr, grad, angle, n_components, verbose, dof=degrees_of_freedom, compute_error=compute_error, num_threads=num_threads)\n c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom\n grad = grad.ravel()\n grad *= c\n return (error, grad)", + "docstring": "t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2). Parameters ---------- params : ndarray of shape (n_params,) Unraveled embedding. P : sparse matrix of shape (n_samples, n_sample) Sparse approximate joint probability matrix, computed only for the k nearest-neighbors and symmetrized. Matrix should be of CSR format. degrees_of_freedom : int Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float, default=0.5 This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int, default=0 This does not compute the gradient for points with indices below . This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int, default=False Verbosity level. compute_error: bool, default=True If False, the kl_divergence is not computed and returns NaN. num_threads : int, default=1 Number of threads used to compute the gradient. This is set here to avoid calling _openmp_effective_n_threads for each gradient step. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : ndarray of shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py", + "ast_data": "FunctionDef name:_kl_divergence_bh arg:params arg:P arg:degrees_of_freedom arg:n_samples arg:n_components arg:angle arg:skip_num_points arg:verbose arg:compute_error arg:num_threads arguments arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_patchB", + "source_code": "def set_patchB(self, patchB):\n self.patchB = patchB\n self.stale = True", + "docstring": "Set the head patch. Parameters ---------- patchB :", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_patchB arg:self arg:patchB arguments arg arg Assign Assign" + }, + { + "library": "scipy", + "name": "rfft", + "source_code": "def rfft(x, n=None, axis=-1, overwrite_x=False):\n return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)", + "docstring": "Discrete Fourier transform of a real sequence. Parameters ---------- x : array_like, real-valued The data to transform. n : int, optional Defines the length of the Fourier transform. If is not specified (the default) then `xxscipy.fft.rfft`. Examples -------- >>> from scipy.fftpack import fft, rfft >>> a = [9, -9, 1, 3] >>> fft(a) array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) >>> rfft(a) array([ 4., 8., 12., 16.])", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_basic.py", + "ast_data": "FunctionDef name:rfft arg:x arg:n arg:axis arg:overwrite_x arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "fit_logx", + "source_code": "def fit_logx(self, grid):\n X, y = (np.c_[np.ones(len(self.x)), self.x], self.y)\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return (yhat, None)\n beta_boots = algo.bootstrap(X, y, func=reg_func, n_boot=self.n_boot, units=self.units, seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return (yhat, yhat_boots)", + "docstring": "Fit the model in log-space.", + "type": "method", + "file_path": "seaborn\\seaborn\\regression.py", + "ast_data": "FunctionDef name:fit_logx arg:self arg:grid arguments arg arg Assign Call Call Assign Call Call Call FunctionDef name:reg_func arg:_x arg:_y arguments arg arg Assign Call Return return:yes Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "polymulx", + "source_code": "def polymulx(c):\n [c] = pu.as_series([c])\n if len(c) == 1 and c[0] == 0:\n return c\n prd = np.empty(len(c) + 1, dtype=c.dtype)\n prd[0] = c[0] * 0\n prd[1:] = c\n return prd", + "docstring": "Multiply a polynomial by x. Multiply the polynomial by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- polyadd, polysub, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1, 2, 3) >>> P.polymulx(c) array([0., 1., 2., 3.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polynomial.py", + "ast_data": "FunctionDef name:polymulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_dict", + "source_code": "def _get_dict(self, ignored_keys: Optional[list[str]]=None, ignored_prefixes: Optional[list[str]]=None, skip_default: bool=False) -> dict[str, Any]:\n config: dict[str, Any] = {}\n for key in self._config:\n if ignored_keys and key in ignored_keys:\n continue\n if ignored_prefixes:\n if any((key.startswith(prefix) for prefix in ignored_prefixes)):\n continue\n if skip_default and self._is_default(key):\n continue\n if self._config[key].alias is not None:\n continue\n config[key] = copy.deepcopy(getattr(self, key))\n return config", + "docstring": "Export a dictionary of current configuration keys and values. This function is design to provide a single point which handles accessing config options and exporting them into a dictionary. This is used by a number of different user facing export methods which all have slightly different semantics re: how and what to skip. If a config is aliased, it skips this config. Arguments: ignored_keys are keys that should not be exported. ignored_prefixes are prefixes that if a key matches should not be exported skip_default does two things. One if a key has not been modified it skips it.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_config_module.py", + "ast_data": "FunctionDef name:_get_dict arg:self arg:ignored_keys arg:ignored_prefixes arg:skip_default arguments arg arg arg arg For If BoolOp Compare If If Call Call If BoolOp Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "BatchNorm1d", + "source_code": "class BatchNorm1d(_BatchNorm):\n\n def _check_input_dim(self, input):\n if input.dim() != 2 and input.dim() != 3:\n raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')", + "docstring": "Applies Batch Normalization over a 2D or 3D input. Method described in the paper __ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{\\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated per-dimension over the mini-batches and :math: and :math: are learnable parameter vectors of size (where is the number of features or channels of the input). By default, the elements of :math: are set to 1 and the elements of :math: are set to 0. At train time in the forward pass, the variance is calculated via the biased estimator, equivalent to `momentumtrack_running_statsmomentum\\hat{x}_\\text{new} = (1 - \\text{momentum}) \\times \\hat{x} + \\text{momentum} \\times x_t\\hat{x}x_tC(N, L)Crunning_meanrunning_var(N, C)(N, C, L)NCL(N, C)(N, C, L)` (same shape as input) Examples:: >>> # With Learnable Parameters >>> m = nn.BatchNorm1d(100) >>> # Without Learnable Parameters >>> m = nn.BatchNorm1d(100, affine=False) >>> input = torch.randn(20, 100) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py", + "ast_data": "ClassDef name:BatchNorm1d FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call" + }, + { + "library": "django", + "name": "get_migration_by_prefix", + "source_code": "def get_migration_by_prefix(self, app_label, name_prefix):\n results = []\n for migration_app_label, migration_name in self.disk_migrations:\n if migration_app_label == app_label and migration_name.startswith(name_prefix):\n results.append((migration_app_label, migration_name))\n if len(results) > 1:\n raise AmbiguityError(\"There is more than one migration for '%s' with the prefix '%s'\" % (app_label, name_prefix))\n elif not results:\n raise KeyError(f\"There is no migration for '{app_label}' with the prefix '{name_prefix}'\")\n else:\n return self.disk_migrations[results[0]]", + "docstring": "Return the migration(s) which match the given app label and name_prefix.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\loader.py", + "ast_data": "FunctionDef name:get_migration_by_prefix arg:self arg:app_label arg:name_prefix arguments arg arg arg Assign For If BoolOp Compare Call Call If Compare Call Raise Call If Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_conv_add_extra_inputs_getter_left", + "source_code": "def _conv_add_extra_inputs_getter_left(pattern):\n _, _conv, extra_input = pattern\n return [extra_input]", + "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py", + "ast_data": "FunctionDef name:_conv_add_extra_inputs_getter_left arg:pattern arguments arg Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_ybound", + "source_code": "def set_ybound(self, lower=None, upper=None, view_margin=None):\n self._set_bound3d(self.get_ybound, self.set_ylim, self.yaxis_inverted, lower, upper, view_margin)", + "docstring": "Set the lower and upper numerical bounds of the y-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. view_margin : float or None The margin to apply to the bounds. If *None*, the margin is handled by . See Also -------- get_ybound get_ylim, set_ylim invert_yaxis, yaxis_inverted", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:set_ybound arg:self arg:lower arg:upper arg:view_margin arguments arg arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "ismethod", + "source_code": "def ismethod(object):\n return _inspect.ismethod(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.ismethod.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:ismethod arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "efficientvit_backbone_b2", + "source_code": "def efficientvit_backbone_b2(**kwargs: dict[str, Any]) -> EfficientViTBackbone:\n backbone = EfficientViTBackbone(width_list=[24, 48, 96, 192, 384], depth_list=[1, 3, 4, 4, 6], dim=32, **build_kwargs_from_config(kwargs, EfficientViTBackbone))\n return backbone", + "docstring": "Create EfficientViT B2.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\backbone.py", + "ast_data": "FunctionDef name:efficientvit_backbone_b2 arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "prune_dense_static_sort", + "source_code": "@classmethod\ndef prune_dense_static_sort(cls, original_tensor: torch.Tensor, algorithm='') -> 'SparseSemiStructuredTensor':\n packed, meta, packed_t, meta_t, compressed_swizzled_bitmask = torch._sparse_semi_structured_tile(original_tensor, algorithm=algorithm, use_cutlass=False)\n return cls(original_tensor.shape, packed=packed, meta=meta, packed_t=packed_t, meta_t=meta_t, compressed_swizzled_bitmask=compressed_swizzled_bitmask, requires_grad=False)", + "docstring": "This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata layout and sparse matmul. The only functional difference is that cuSPARSELt stores and together into a single tensor. [9 1 7 4] [9 0 7 0] [1 2 3 0] [0 2 0 0] [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed [1 2 6 2] [0 0 6 2] -> pack to transposed cuSPARSELt -> packed_t semi-structured representation -> compute swizzled bitmask -> compressed_swizzled_bitmask The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below:", + "type": "method", + "file_path": "pytorch\\torch\\sparse\\semi_structured.py", + "ast_data": "FunctionDef name:prune_dense_static_sort arg:cls arg:original_tensor arg:algorithm arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Ipython", + "source_code": "@cli.cls_cmd('ipython')\nclass Ipython(Python):\n ctx = CONTEXT\n pythonpath = Python.pythonpath\n\n @classmethod\n def run(cls, pythonpath, **kwargs):\n cls._setup(pythonpath, **kwargs)\n import IPython\n IPython.embed(user_ns={})", + "docstring": ":wrench: Start IPython shell with PYTHONPATH set. Running is equivalent to: 1. Execute build command (skip by passing the global option). 2. Set the PYTHONPATH environment variable (query with ). 3. Run the interpreter.", + "type": "class", + "file_path": "scipy\\dev.py", + "ast_data": "ClassDef name:Ipython Assign Assign FunctionDef name:run arg:cls arg:pythonpath arguments arg arg arg Call Call Call" + }, + { + "library": "sphinx", + "name": "clear_doc", + "source_code": "def clear_doc(self, docname: str) -> None:\n pass", + "docstring": "Remove traces of a document in the domain-specific inventories.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\__init__.py", + "ast_data": "FunctionDef name:clear_doc arg:self arg:docname arguments arg arg" + }, + { + "library": "matplotlib", + "name": "_get_font", + "source_code": "def _get_font(self, prop):\n filenames = _fontManager._find_fonts_by_props(prop)\n font = get_font(filenames)\n font.set_size(self.FONT_SCALE, self.DPI)\n return font", + "docstring": "Find the matching font properties *prop*, with its size set.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:_get_font arg:self arg:prop arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "StateSpaceContinuous", + "source_code": "class StateSpaceContinuous(StateSpace, lti):\n\n def to_discrete(self, dt, method='zoh', alpha=None):\n return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), dt, method=method, alpha=alpha)[:-1], dt=dt)", + "docstring": "Continuous-time Linear Time Invariant system in state-space form. Represents the system as the continuous-time, first order differential equation :math:. Continuous-time systems inherit additional functionality from the class. Parameters ---------- *system: arguments The class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: system: (, or ) * 4: array_like: (A, B, C, D) See Also -------- TransferFunction, ZerosPolesGain, lti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the system representation (such as or ) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call `` before accessing/changing the zeros, poles or gain. Examples -------- >>> import numpy as np >>> from scipy import signal >>> a = np.array([[0, 1], [0, 0]]) >>> b = np.array([[0], [1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> sys = signal.StateSpace(a, b, c, d) >>> print(sys) StateSpaceContinuous( array([[0, 1], [0, 0]]), array([[0], [1]]), array([[1, 0]]), array([[0]]), dt: None )", + "type": "class", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "ClassDef name:StateSpaceContinuous FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "currentframe", + "source_code": "def currentframe():\n return _inspect.stack()[1][0]", + "docstring": "TFDecorator-aware replacement for inspect.currentframe.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:currentframe arguments Return return:yes Call" + }, + { + "library": "scipy", + "name": "HolderTable", + "source_code": "class HolderTable(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [(8.055023472141116, 9.664590028909654), (-8.055023472141116, 9.664590028909654), (8.055023472141116, -9.664590028909654), (-8.055023472141116, -9.664590028909654)]\n self.fglob = -19.20850256788675\n\n def fun(self, x, *args):\n self.nfev += 1\n return -abs(sin(x[0]) * cos(x[1]) * exp(abs(1 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))", + "docstring": "HolderTable objective function. This class defines the HolderTable [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{HolderTable}}({x}) = - \\left|{e^{\\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right| with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil #146 equation is wrong - should be squaring the x1 and x2 terms, but isn't. Gavana does.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py", + "ast_data": "ClassDef name:HolderTable FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "CloughTocherInterpolatorSubclass", + "source_code": "class CloughTocherInterpolatorSubclass(Benchmark):\n param_names = ['n_samples']\n params = [10, 50, 100]\n\n def setup(self, n_samples):\n rng = np.random.default_rng(314159)\n x = rng.random(n_samples) - 0.5\n y = rng.random(n_samples) - 0.5\n self.z = np.hypot(x, y)\n X = np.linspace(min(x), max(x))\n Y = np.linspace(min(y), max(y))\n self.X, self.Y = np.meshgrid(X, Y)\n self.interp = CloughTocherInterpolatorValues(list(zip(x, y)), (self.X, self.Y))\n\n def time_clough_tocher(self, n_samples):\n self.interp(self.z)", + "docstring": "Benchmark CloughTocherInterpolatorValues. Derived from the docstring example,", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py", + "ast_data": "ClassDef name:CloughTocherInterpolatorSubclass Assign Assign FunctionDef name:setup arg:self arg:n_samples arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Call FunctionDef name:time_clough_tocher arg:self arg:n_samples arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "AveragePooling2D", + "source_code": "class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):\n\n def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(AveragePooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", + "docstring": "Average pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py", + "ast_data": "ClassDef name:AveragePooling2D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call" + }, + { + "library": "scipy", + "name": "_assemble_sparse_jacobian", + "source_code": "def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):\n n_vars, n_ineq, n_eq = (self.n_vars, self.n_ineq, self.n_eq)\n J_aux = sps.vstack([J_eq, J_ineq], 'csr')\n indptr, indices, data = (J_aux.indptr, J_aux.indices, J_aux.data)\n new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), np.arange(n_ineq + 1, dtype=int)))\n size = indices.size + n_ineq\n new_indices = np.empty(size)\n new_data = np.empty(size)\n mask = np.full(size, False, bool)\n mask[new_indptr[-n_ineq:] - 1] = True\n new_indices[mask] = n_vars + np.arange(n_ineq)\n new_indices[~mask] = indices\n new_data[mask] = s\n new_data[~mask] = data\n J = sps.csr_array((new_data, new_indices, new_indptr), (n_eq + n_ineq, n_vars + n_ineq))\n return J", + "docstring": "Assemble sparse Jacobian given its components. Given `` returns: jacobian = [ J_eq, 0 ] [ J_ineq, diag(s) ] It is equivalent to: sps.bmat([[ J_eq, None ], [ J_ineq, diag(s) ]], \"csr\") but significantly more efficient for this given structure.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py", + "ast_data": "FunctionDef name:_assemble_sparse_jacobian arg:self arg:J_eq arg:J_ineq arg:s arguments arg arg arg arg Assign Assign Call Assign Assign Call Call Call Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "normalize_to_torch_size", + "source_code": "def normalize_to_torch_size(size) -> torch.Size:\n if isinstance(size, torch.Size):\n return size\n if isinstance(size, int):\n torch_size = [size]\n elif len(size) == 1 and isinstance(size[0], Sequence):\n torch_size = list(size[0])\n else:\n torch_size = list(size)\n return torch.Size(torch_size)", + "docstring": "Unify variable types of size argument to torch.Size Acceptable types include: int, Sequence[int], Tuple[int], Tuple[Sequence[int]], or torch.Size", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py", + "ast_data": "FunctionDef name:normalize_to_torch_size arg:size arguments arg If Call Return return:yes If Call Assign If BoolOp Compare Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_select_best_index", + "source_code": "@staticmethod\ndef _select_best_index(refit, refit_metric, results):\n last_iter = np.max(results['iter'])\n last_iter_indices = np.flatnonzero(results['iter'] == last_iter)\n test_scores = results['mean_test_score'][last_iter_indices]\n if np.isnan(test_scores).all():\n best_idx = 0\n else:\n best_idx = np.nanargmax(test_scores)\n return last_iter_indices[best_idx]", + "docstring": "Custom refit callable to return the index of the best candidate. We want the best candidate out of the last iteration. By default BaseSearchCV would return the best candidate out of all iterations. Currently, we only support for a single metric thus and are not required.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search_successive_halving.py", + "ast_data": "FunctionDef name:_select_best_index arg:refit arg:refit_metric arg:results arguments arg arg arg Assign Call Assign Call Compare Assign If Call Call Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_assign_stablehlo_quantization_config_or_populate_default", + "source_code": "def _assign_stablehlo_quantization_config_or_populate_default(self, args):\n if self.experimental_stablehlo_quantizer_config is not None and Optimize.DEFAULT not in self.optimizations:\n args['quantization_config'] = self.experimental_stablehlo_quantizer_config\n elif Optimize.DEFAULT in self.optimizations and self.representative_dataset:\n if len(self._saved_model_exported_names) != 1:\n raise ValueError('StableHLO quantizer is only supported when converting from a SavedModel with one signature key.')\n signature_key = self._saved_model_exported_names[0]\n tfrecord_file_path = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)[1]\n rd.TfRecordRepresentativeDatasetSaver({signature_key: tfrecord_file_path}).save({signature_key: self.representative_dataset()})\n quantization_config = qc.QuantizationConfig(static_range_ptq_preset=qc.StaticRangePtqPreset(representative_datasets=[qc.RepresentativeDatasetConfig(tf_record=qc.TfRecordFile(path=tfrecord_file_path))], enable_per_channel_quantized_weight=True, enable_full_int_quantization=True), pipeline_config=qc.PipelineConfig(unpack_quantized_types=False))\n args['quantization_config'] = quantization_config\n else:\n raise ValueError('StableHLO quantizer only supports static-range and weight-only PTQ.')", + "docstring": "Assigns to or populate default. Args: args: Dictionary of argument names and associated values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_assign_stablehlo_quantization_config_or_populate_default arg:self arg:args arguments arg arg If BoolOp Compare Compare Assign If BoolOp Compare If Compare Call Raise Call Assign Assign Call Call Call Call Assign Call Call Call Call Call Assign Raise Call" + }, + { + "library": "django", + "name": "CaseInsensitiveMixin", + "source_code": "class CaseInsensitiveMixin:\n\n def process_lhs(self, compiler, connection):\n lhs, lhs_params = super().process_lhs(compiler, connection)\n if connection.vendor == 'mysql':\n return ('LOWER(%s)' % lhs, lhs_params)\n return (lhs, lhs_params)\n\n def process_rhs(self, compiler, connection):\n rhs, rhs_params = super().process_rhs(compiler, connection)\n if connection.vendor == 'mysql':\n return ('LOWER(%s)' % rhs, rhs_params)\n return (rhs, rhs_params)", + "docstring": "Mixin to allow case-insensitive comparison of JSON values on MySQL. MySQL handles strings used in JSON context using the utf8mb4_bin collation. Because utf8mb4_bin is a binary collation, comparison of JSON values is case-sensitive.", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\json.py", + "ast_data": "ClassDef name:CaseInsensitiveMixin FunctionDef name:process_lhs arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Call If Compare Return return:yes Return return:yes FunctionDef name:process_rhs arg:self arg:compiler arg:connection arguments arg arg arg Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "transform_feature", + "source_code": "def transform_feature(self, transformation_cache, state_manager):\n input_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.key, state_manager))\n return self._transform_input_tensor(input_tensor, state_manager)", + "docstring": "Creates a lookup table for the vocabulary list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "fx", + "source_code": "@property\ndef fx(self) -> Tensor:\n return self._params[..., 0]", + "docstring": "Returns the focal length in x direction.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:fx arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_arg_x_as_expected", + "source_code": "def _arg_x_as_expected(value):\n value = np.asarray(value, order='C', dtype=np.float64)\n if value.ndim != 1:\n raise ValueError('`x` must be a 1-D array')\n return value", + "docstring": "Ensure argument is a 1-D C-contiguous array of dtype('float64'). Used in , and to make compatible with the signature of the wrapped Cython functions. Returns ------- value : ndarray A 1-D C-contiguous array with dtype('float64').", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:_arg_x_as_expected arg:value arguments arg Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "top_k_categorical_accuracy", + "source_code": "@dispatch.add_dispatch_support\ndef top_k_categorical_accuracy(y_true, y_pred, k=5):\n return math_ops.cast(nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), backend.floatx())", + "docstring": "Computes how often targets are in the top predictions. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: The ground truth values. y_pred: The prediction values. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Top K categorical accuracy value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:top_k_categorical_accuracy arg:y_true arg:y_pred arg:k arguments arg arg arg Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "_skipped_use_sharded_views", + "source_code": "@property\ndef _skipped_use_sharded_views(self) -> bool:\n return self._unsharded_flat_param_for_skipped_views is not None", + "docstring": "This property is used for sharding strategies that do not free after forward with ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_skipped_use_sharded_views arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "from_dict", + "source_code": "@classmethod\ndef from_dict(cls, backend_config_dict: dict[str, Any]) -> BackendConfig:\n conf = cls(backend_config_dict.get(NAME_DICT_KEY, ''))\n for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):\n if isinstance(d, BackendPatternConfig):\n conf.set_backend_pattern_config(d)\n elif isinstance(d, dict):\n conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))\n else:\n raise ValueError(f\"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary\")\n return conf", + "docstring": "Create a `BackendPatternConfig`", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:from_dict arg:cls arg:backend_config_dict arguments arg arg Assign Call Call For Call If Call Call If Call Call Call Raise Call Return return:yes" + }, + { + "library": "numpy", + "name": "getbufsize", + "source_code": "@set_module('numpy')\ndef getbufsize():\n return _get_extobj_dict()['bufsize']", + "docstring": "Return the size of the buffer used in ufuncs. Returns ------- getbufsize : int Size of ufunc buffer in bytes. Examples -------- >>> import numpy as np >>> np.getbufsize() 8192", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_ufunc_config.py", + "ast_data": "FunctionDef name:getbufsize arguments Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "put", + "source_code": "def put(self, indices, values, mode='raise'):\n if self._hardmask and self._mask is not nomask:\n mask = self._mask[indices]\n indices = narray(indices, copy=None)\n values = narray(values, copy=None, subok=True)\n values.resize(indices.shape)\n indices = indices[~mask]\n values = values[~mask]\n self._data.put(indices, values, mode=mode)\n if self._mask is nomask and getmask(values) is nomask:\n return\n m = getmaskarray(self)\n if getmask(values) is nomask:\n m.put(indices, False, mode=mode)\n else:\n m.put(indices, values._mask, mode=mode)\n m = make_mask(m, copy=False, shrink=True)\n self._mask = m\n return", + "docstring": "Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If is shorter than then it will repeat. If has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- can be a scalar or length 1 array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put([0,4,8],[10,20,30]) >>> x masked_array( data=[[10, --, 3], [--, 20, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put(4,999) >>> x masked_array( data=[[10, --, 3], [--, 999, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999)", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:put arg:self arg:indices arg:values arg:mode arguments arg arg arg arg If BoolOp Compare Assign Assign Call Assign Call Call Assign Assign Call If BoolOp Compare Compare Call Return return:no Assign Call If Compare Call Call Call Assign Call Assign Return return:no" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):\n self._profile_datum_list = profile_datum_list\n self.formatted_start_time = [datum.start_time for datum in profile_datum_list]\n self.formatted_op_time = [cli_shared.time_to_readable_str(datum.op_time, force_time_unit=time_unit) for datum in profile_datum_list]\n self.formatted_exec_time = [cli_shared.time_to_readable_str(datum.node_exec_stats.all_end_rel_micros, force_time_unit=time_unit) for datum in profile_datum_list]\n self._column_names = ['Node', 'Op Type', 'Start Time (us)', 'Op Time (%s)' % time_unit, 'Exec Time (%s)' % time_unit, 'Filename:Lineno(function)']\n self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]", + "docstring": "Constructor. Args: profile_datum_list: List of objects. time_unit: must be in cli_shared.TIME_UNITS.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:profile_datum_list arg:time_unit arguments arg arg arg Assign Assign Assign Call Assign Call Assign Assign" + }, + { + "library": "pytorch", + "name": "_clear_cache", + "source_code": "def _clear_cache(self) -> None:\n self._partition_parameters_cache.clear()\n self._param_to_rank_cache.clear()\n self._index_to_param_cache.clear()\n self._param_to_index_cache.clear()\n self._device_to_params_per_rank_cache.clear()\n self._bucket_assignments_per_rank_cache.clear()", + "docstring": "Clear the cached data structures giving partition information.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_clear_cache arg:self arguments arg Call Call Call Call Call Call" + }, + { + "library": "kornia", + "name": "_pair_square_euclidean", + "source_code": "def _pair_square_euclidean(tensor1: torch.Tensor, tensor2: torch.Tensor) -> torch.Tensor:\n t1_sq: torch.Tensor = tensor1.mul(tensor1).sum(dim=-1, keepdim=True)\n t2_sq: torch.Tensor = tensor2.mul(tensor2).sum(dim=-1, keepdim=True).transpose(1, 2)\n t1_t2: torch.Tensor = tensor1.matmul(tensor2.transpose(1, 2))\n square_dist: torch.Tensor = -2 * t1_t2 + t1_sq + t2_sq\n square_dist = square_dist.clamp(min=0)\n return square_dist", + "docstring": "Compute the pairwise squared euclidean distance matrices :math: between two tensors. Tensors with shapes (B, N, C) and (B, M, C).", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\thin_plate_spline.py", + "ast_data": "FunctionDef name:_pair_square_euclidean arg:tensor1 arg:tensor2 arguments arg arg Call Call Call Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ListFetchMapper", + "source_code": "class _ListFetchMapper(_FetchMapper):\n\n def __init__(self, fetches):\n if isinstance(fetches, wrapt.ObjectProxy):\n self._fetch_type = type(fetches.__wrapped__)\n else:\n self._fetch_type = type(fetches)\n self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]\n self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n def unique_fetches(self):\n return self._unique_fetches\n\n def build_results(self, values):\n results = []\n for m, vi in zip(self._mappers, self._value_indices):\n results.append(m.build_results([values[j] for j in vi]))\n if issubclass(self._fetch_type, list):\n return results\n elif self._fetch_type == tuple:\n return tuple(results)\n else:\n return self._fetch_type(*results)", + "docstring": "Fetch mapper for lists, tuples, and namedtuples.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "ClassDef name:_ListFetchMapper FunctionDef name:__init__ arg:self arg:fetches arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:unique_fetches arg:self arguments arg Return return:yes FunctionDef name:build_results arg:self arg:values arguments arg arg Assign For Call Call Call If Call Return return:yes If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_is_guaranteed_const", + "source_code": "def _is_guaranteed_const(tensor):\n if isinstance(tensor, ops.EagerTensor):\n return False\n\n class Work(object):\n\n def __init__(self, op: ops.Operation, leaving):\n self.op = op\n self.leaving = leaving\n is_guaranteed_const = lambda op: op.node_def.op == 'GuaranteeConst'\n constants = set([])\n\n def all_inputs_const(op: ops.Operation):\n return op.inputs and all((inp.op in constants for inp in op.inputs))\n visited = set([])\n stack = [Work(tensor.op, leaving=False)]\n while stack:\n work = stack.pop()\n if work.leaving:\n if all_inputs_const(work.op):\n constants.add(work.op)\n continue\n visited.add(work.op)\n if is_guaranteed_const(work.op):\n constants.add(work.op)\n continue\n stack.append(Work(work.op, leaving=True))\n for inp in work.op.inputs:\n if inp.op not in visited:\n stack.append(Work(inp.op, leaving=False))\n return tensor.op in constants", + "docstring": "Determines whether is guaranteed to be a constant. A tensor is guaranteed to be a constant if either it was produced by a op or if all of its children are guaranteed to be constants. Args: tensor: The tensor for which to determine const-ness. Returns: True if is guaranteed to be a constant, False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", + "ast_data": "FunctionDef name:_is_guaranteed_const arg:tensor arguments arg If Call Return return:yes ClassDef name:Work FunctionDef name:__init__ arg:self arg:op arg:leaving arguments arg arg arg Assign Assign Assign arguments arg Compare Assign Call FunctionDef name:all_inputs_const arg:op arguments arg Return return:yes BoolOp Call Compare Assign Call Assign Call While Assign Call If If Call Call Call If Call Call Call Call For If Compare Call Call Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "get_siblings", + "source_code": "def get_siblings(self, a):\n return self._grouper.get_siblings(a)", + "docstring": "Return all of the items joined with *a*, including itself.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:get_siblings arg:self arg:a arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "get_terminal_width", + "source_code": "def get_terminal_width() -> int:\n return shutil.get_terminal_size().columns - 1", + "docstring": "Return the width of the terminal in columns.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\console.py", + "ast_data": "FunctionDef name:get_terminal_width arguments Return return:yes Call" + }, + { + "library": "pandas", + "name": "__arrow_array__", + "source_code": "def __arrow_array__(self, type=None):\n import pyarrow as pa\n if type is None:\n type = pa.string()\n values = self._ndarray.copy()\n values[self.isna()] = None\n return pa.array(values, type=type, from_pandas=True)", + "docstring": "Convert myself into a pyarrow Array.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\string_.py", + "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg If Compare Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_StridedSliceGrad", + "source_code": "@ops.RegisterGradient('StridedSlice')\ndef _StridedSliceGrad(op: ops.Operation, grad):\n begin = op.inputs[1]\n end = op.inputs[2]\n strides = op.inputs[3]\n x = array_ops.shape(op.inputs[0], out_type=begin.dtype)\n x_static = tensor_util.constant_value(x)\n x = x_static if x_static is not None else x\n begin_static = tensor_util.constant_value(begin)\n begin = begin_static if begin_static is not None else begin\n end_static = tensor_util.constant_value(end)\n end = end_static if end_static is not None else end\n strides_static = tensor_util.constant_value(strides)\n strides = strides_static if strides_static is not None else strides\n return (array_ops.strided_slice_grad(x, begin, end, strides, grad, begin_mask=op.get_attr('begin_mask'), end_mask=op.get_attr('end_mask'), ellipsis_mask=op.get_attr('ellipsis_mask'), new_axis_mask=op.get_attr('new_axis_mask'), shrink_axis_mask=op.get_attr('shrink_axis_mask')), None, None, None)", + "docstring": "Gradient for StridedSlice op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_StridedSliceGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Compare Assign Call Assign Compare Assign Call Assign Compare Assign Call Assign Compare Return return:yes Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_ops_from_ops_list", + "source_code": "def _get_ops_from_ops_list(input_file):\n ops = set()\n ops_list_str = gfile.GFile(input_file, 'r').read()\n if not ops_list_str:\n raise Exception('Input file should not be empty')\n ops_list = json.loads(ops_list_str)\n for op, kernel in ops_list:\n op_and_kernel = (op, kernel if kernel else None)\n ops.add(op_and_kernel)\n return ops", + "docstring": "Gets the ops and kernels needed from the ops list file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py", + "ast_data": "FunctionDef name:_get_ops_from_ops_list arg:input_file arguments arg Assign Call Assign Call Call If Raise Call Assign Call For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_accumulator_dtype", + "source_code": "def get_accumulator_dtype(input_torch_dtypes: list[torch.dtype]) -> Optional[torch.dtype]:\n if len(input_torch_dtypes) != 2:\n return None\n torch_dtype = None\n if input_torch_dtypes[0] == input_torch_dtypes[1]:\n torch_dtype = input_torch_dtypes[0]\n else:\n size0 = torch.tensor([], dtype=input_torch_dtypes[0]).element_size()\n size1 = torch.tensor([], dtype=input_torch_dtypes[1]).element_size()\n if size0 > size1:\n dtype0, dtype1 = input_torch_dtypes\n else:\n dtype1, dtype0 = input_torch_dtypes\n if dtype0 in [torch.half, torch.bfloat16] and dtype1 in [torch.int8, torch.uint8]:\n torch_dtype = dtype0\n if torch_dtype in (torch.float16, torch.bfloat16, torch.float, torch.float8_e4m3fn):\n return torch.float\n if torch_dtype == torch.int8:\n return torch.int32\n raise NotImplementedError(f'Unsupported data types: input_torch_dtypes={input_torch_dtypes!r}')", + "docstring": "Given a pair of input torch dtypes, returns the inferred accumulator torch dtype.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py", + "ast_data": "FunctionDef name:get_accumulator_dtype arg:input_torch_dtypes arguments arg If Compare Call Return return:no Assign If Compare Assign Assign Call Call Assign Call Call If Compare Assign Assign If BoolOp Compare Compare Assign If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "pop_tape", + "source_code": "def pop_tape(tape):\n pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape)", + "docstring": "Pops the given tape in the stack.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py", + "ast_data": "FunctionDef name:pop_tape arg:tape arguments arg Call" + }, + { + "library": "scipy", + "name": "kelvin_zeros", + "source_code": "def kelvin_zeros(nt):\n if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n raise ValueError('nt must be positive integer scalar.')\n return (_specfun.klvnzo(nt, 1), _specfun.klvnzo(nt, 2), _specfun.klvnzo(nt, 3), _specfun.klvnzo(nt, 4), _specfun.klvnzo(nt, 5), _specfun.klvnzo(nt, 6), _specfun.klvnzo(nt, 7), _specfun.klvnzo(nt, 8))", + "docstring": "Compute nt zeros of all Kelvin functions. Returned in a length-8 tuple of arrays of length nt. The tuple contains the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:kelvin_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call Call Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "has_free_symbols", + "source_code": "def has_free_symbols(val: IterateExprs) -> bool:\n return not all((e.is_number or e.is_Boolean for e in _iterate_exprs(val)))", + "docstring": "Faster version of bool(free_symbols(val))", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:has_free_symbols arg:val arguments arg Return return:yes Call BoolOp Call" + }, + { + "library": "pandas", + "name": "_shallow_copy", + "source_code": "def _shallow_copy(self, values, name: Hashable=no_default) -> Self:\n name = self._name if name is no_default else name\n return self._simple_new(values, name=name, refs=self._references)", + "docstring": "Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_shallow_copy arg:self arg:values arg:name arguments arg arg arg Assign Compare Return return:yes Call" + }, + { + "library": "kornia", + "name": "normalize_points_with_intrinsics", + "source_code": "def normalize_points_with_intrinsics(point_2d: Tensor, camera_matrix: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(point_2d, ['*', '2'])\n KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3'])\n cxcy = camera_matrix[..., :2, 2]\n fxfy = camera_matrix[..., :2, :2].diagonal(dim1=-2, dim2=-1)\n if len(cxcy.shape) < len(point_2d.shape):\n cxcy, fxfy = (cxcy.unsqueeze(-2), fxfy.unsqueeze(-2))\n xy = (point_2d - cxcy) / fxfy\n return xy", + "docstring": "Normalize points with intrinsics. Useful for conversion of keypoints to be used with essential matrix. Args: point_2d: tensor containing the 2d points in the image pixel coordinates. The shape of the tensor can be :math:. camera_matrix: tensor containing the intrinsics camera matrix. The tensor shape must be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 2) >>> K = torch.eye(3)[None] >>> normalize_points_with_intrinsics(X, K) tensor([[0.4963, 0.7682]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:normalize_points_with_intrinsics arg:point_2d arg:camera_matrix arguments arg arg Call Call Assign Assign Call If Compare Call Call Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "name_scope", + "source_code": "@tf_contextlib.contextmanager\ndef name_scope(self, name) -> Iterator[str]:\n if name:\n if isinstance(name, compat.bytes_or_text_types):\n name = compat.as_str(name)\n if self._name_stack:\n if not _VALID_SCOPE_NAME_REGEX.match(name):\n raise ValueError(f\"'{name}' is not a valid scope name. A scope name has to match the following pattern: {_VALID_SCOPE_NAME_REGEX.pattern}\")\n elif not _VALID_OP_NAME_REGEX.match(name):\n raise ValueError(f\"'{name}' is not a valid root scope name. A root scope name has to match the following pattern: {_VALID_OP_NAME_REGEX.pattern}\")\n old_stack = self._name_stack\n if not name:\n new_stack = ''\n returned_scope = ''\n elif name[-1] == '/':\n new_stack = name_from_scope_name(name)\n returned_scope = name\n else:\n new_stack = self.unique_name(name)\n returned_scope = new_stack + '/'\n self._name_stack = new_stack\n try:\n yield returned_scope\n finally:\n self._name_stack = old_stack", + "docstring": "Returns a context manager that creates hierarchical names for operations. A graph maintains a stack of name scopes. A statement pushes a new name onto the stack for the lifetime of the context. The argument will be interpreted as follows: * A string (not ending with '/') will create a new name scope, in which is appended to the prefix of all operations created in the context. If has been used before, it will be made unique by calling . * A scope previously captured from a statement will be treated as an \"absolute\" name scope, which makes it possible to re-enter existing scopes. * A value of or the empty string will reset the current name scope to the top-level (empty) name scope. For example: The name of the scope itself can be captured by , which stores the name of the scope in the variable . This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example: NOTE: This constructor validates the given . Valid scope names match one of the following regular expressions: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root) [A-Za-z0-9_.\\-/]* (for other scopes) Args: name: A name for the scope. Returns: A context manager that installs as a new name scope. Raises: ValueError: If is not a valid scope name, according to the rules above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:name_scope arg:self arg:name arguments arg arg If If Call Assign Call If If Call Raise Call If Call Raise Call Assign If Assign Assign If Compare Assign Call Assign Assign Call Assign Assign Try Assign" + }, + { + "library": "django", + "name": "_plural_string", + "source_code": "@property\ndef _plural_string(self):\n if '' in self.translation._catalog:\n for line in self.translation._catalog[''].split('\\n'):\n if line.startswith('Plural-Forms:'):\n return line.split(':', 1)[1].strip()\n return None", + "docstring": "Return the plural string (including nplurals) for this catalog language, or None if no plural string is available.", + "type": "method", + "file_path": "django\\django\\views\\i18n.py", + "ast_data": "FunctionDef name:_plural_string arg:self arguments arg If Compare For Call If Call Return return:yes Call Call Return return:no" + }, + { + "library": "matplotlib", + "name": "AsinhScale", + "source_code": "class AsinhScale(ScaleBase):\n name = 'asinh'\n auto_tick_multipliers = {3: (2,), 4: (2,), 5: (2,), 8: (2, 4), 10: (2, 5), 16: (2, 4, 8), 64: (4, 16), 1024: (256, 512)}\n\n def __init__(self, axis, *, linear_width=1.0, base=10, subs='auto', **kwargs):\n super().__init__(axis)\n self._transform = AsinhTransform(linear_width)\n self._base = int(base)\n if subs == 'auto':\n self._subs = self.auto_tick_multipliers.get(self._base)\n else:\n self._subs = subs\n linear_width = property(lambda self: self._transform.linear_width)\n\n def get_transform(self):\n return self._transform\n\n def set_default_locators_and_formatters(self, axis):\n axis.set(major_locator=AsinhLocator(self.linear_width, base=self._base), minor_locator=AsinhLocator(self.linear_width, base=self._base, subs=self._subs), minor_formatter=NullFormatter())\n if self._base > 1:\n axis.set_major_formatter(LogFormatterSciNotation(self._base))\n else:\n axis.set_major_formatter('{x:.3g}')", + "docstring": "A quasi-logarithmic scale based on the inverse hyperbolic sine (asinh) For values close to zero, this is essentially a linear scale, but for large magnitude values (either positive or negative) it is asymptotically logarithmic. The transition between these linear and logarithmic regimes is smooth, and has no discontinuities in the function gradient in contrast to the (\"symlog\") scale. Specifically, the transformation of an axis coordinate :math: is :math: where :math: is the effective width of the linear region of the transformation. In that region, the transformation is :math:. For large values of :math: the transformation behaves as :math:. .. note:: This API is provisional and may be revised in the future based on early user feedback.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "ClassDef name:AsinhScale Assign Assign FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg arg Call Call Assign Call Assign Call If Compare Assign Call Assign Assign Call arguments arg FunctionDef name:get_transform arg:self arguments arg Return return:yes FunctionDef name:set_default_locators_and_formatters arg:self arg:axis arguments arg arg Call Call Call Call If Compare Call Call Call" + }, + { + "library": "django", + "name": "migration_plan", + "source_code": "def migration_plan(self, targets, clean_start=False):\n plan = []\n if clean_start:\n applied = {}\n else:\n applied = dict(self.loader.applied_migrations)\n for target in targets:\n if target[1] is None:\n for root in self.loader.graph.root_nodes():\n if root[0] == target[0]:\n for migration in self.loader.graph.backwards_plan(root):\n if migration in applied:\n plan.append((self.loader.graph.nodes[migration], True))\n applied.pop(migration)\n elif self.loader.replace_migrations and target not in self.loader.graph.node_map:\n self.loader.replace_migrations = False\n self.loader.build_graph()\n return self.migration_plan(targets, clean_start=clean_start)\n elif target in applied:\n next_in_app = sorted((n for n in self.loader.graph.node_map[target].children if n[0] == target[0]))\n for node in next_in_app:\n for migration in self.loader.graph.backwards_plan(node):\n if migration in applied:\n plan.append((self.loader.graph.nodes[migration], True))\n applied.pop(migration)\n else:\n for migration in self.loader.graph.forwards_plan(target):\n if migration not in applied:\n plan.append((self.loader.graph.nodes[migration], False))\n applied[migration] = self.loader.graph.nodes[migration]\n return plan", + "docstring": "Given a set of targets, return a list of (Migration instance, backwards?).", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:migration_plan arg:self arg:targets arg:clean_start arguments arg arg arg Assign If Assign Assign Call For If Compare For Call If Compare For Call If Compare Call Call If BoolOp Compare Assign Call Return return:yes Call If Compare Assign Call Compare For For Call If Compare Call Call For Call If Compare Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "true_fn", + "source_code": "def true_fn(control_inputs, body_pfor, body_output, stacked):\n converted_control_inp = []\n for x in control_inputs:\n for t in x.outputs:\n converted_control_inp.append(body_pfor._convert_helper(t).t)\n if stacked:\n output = body_pfor.convert(body_output)\n else:\n output, convert_stacked, _ = body_pfor._convert_helper(body_output)\n assert convert_stacked == stacked, body_output\n with ops.control_dependencies(converted_control_inp):\n return array_ops.identity(output)", + "docstring": "Converts the body function for all but last iteration. This essentially converts body_output. Additionally, it needs to handle any control dependencies on the NextIteration node. So it creates another Identity node with the converted dependencies.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:true_fn arg:control_inputs arg:body_pfor arg:body_output arg:stacked arguments arg arg arg arg Assign For For Call Call If Assign Call Assign Call Compare With Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "pad", + "source_code": "def pad(self, padding_size: Tensor) -> Boxes:\n if not (len(padding_size.shape) == 2 and padding_size.size(1) == 4):\n raise RuntimeError(f'Expected padding_size as (B, 4). Got {padding_size.shape}.')\n self._data[..., 0] += padding_size[..., None, :1].to(device=self._data.device)\n self._data[..., 1] += padding_size[..., None, 2:3].to(device=self._data.device)\n return self", + "docstring": "Pad a bounding box. Args: padding_size: (B, 4)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\boxes.py", + "ast_data": "FunctionDef name:pad arg:self arg:padding_size arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "depth_from_point", + "source_code": "def depth_from_point(R: Tensor, t: Tensor, X: Tensor) -> Tensor:\n X_tmp = R @ X.transpose(-2, -1)\n X_out = X_tmp[..., 2, :] + t[..., 2, :]\n return X_out", + "docstring": "Return the depth of a point transformed by a rigid transform. Args: R: The rotation matrix with shape :math:. t: The translation vector with shape :math:. X: The 3d points with shape :math:. Returns: The depth value per point with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py", + "ast_data": "FunctionDef name:depth_from_point arg:R arg:t arg:X arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "seaborn", + "name": "_default_values", + "source_code": "def _default_values(self, n: int) -> list[MarkerStyle]:\n markers = ['o', 'X', (4, 0, 45), 'P', (4, 0, 0), (4, 1, 0), '^', (4, 1, 45), 'v']\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n markers = [MarkerStyle(m) for m in markers[:n]]\n return markers", + "docstring": "Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class: objects. All markers will be filled.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:_default_values arg:self arg:n arguments arg arg Assign Assign While Compare Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "choose", + "source_code": "def choose(indices, choices, out=None, mode='raise'):\n\n def fmask(x):\n if x is masked:\n return True\n return filled(x)\n\n def nmask(x):\n if x is masked:\n return True\n return getmask(x)\n c = filled(indices, 0)\n masks = [nmask(x) for x in choices]\n data = [fmask(x) for x in choices]\n outputmask = np.choose(c, masks, mode=mode)\n outputmask = make_mask(mask_or(outputmask, getmask(indices)), copy=False, shrink=True)\n d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)\n if out is not None:\n if isinstance(out, MaskedArray):\n out.__setmask__(outputmask)\n return out\n d.__setmask__(outputmask)\n return d", + "docstring": "Use an index array to construct a new array from a list of choices. Given an array of integers and a list of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- indices : ndarray of ints This array must contain integers in `dtype`. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' : raise an error * 'wrap' : wrap around * 'clip' : clip to the range Returns ------- merged_array : array See Also -------- choose : equivalent function Examples -------- >>> import numpy as np >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) masked_array(data=[3, 2, 1], mask=False, fill_value=999999)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:choose arg:indices arg:choices arg:out arg:mode arguments arg arg arg arg FunctionDef name:fmask arg:x arguments arg If Compare Return return:yes Return return:yes Call FunctionDef name:nmask arg:x arguments arg If Compare Return return:yes Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call If Compare If Call Call Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "num_chunks", + "source_code": "def num_chunks(self) -> int:\n return 1", + "docstring": "Return the number of chunks the column consists of.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\column.py", + "ast_data": "FunctionDef name:num_chunks arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "verify_url_scheme", + "source_code": "def verify_url_scheme(url: str) -> str:\n parsed = urlparse(url)\n if parsed.scheme == '' and parsed.netloc == '':\n parsed = urlparse('//' + url)._replace(scheme='https')\n return parsed.geturl()", + "docstring": "Check url for scheme and insert https if none found.", + "type": "function", + "file_path": "scrapy\\scrapy\\commands\\genspider.py", + "ast_data": "FunctionDef name:verify_url_scheme arg:url arguments arg Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "check_models_ready", + "source_code": "def check_models_ready(self):\n if not self.models_ready:\n raise AppRegistryNotReady(\"Models aren't loaded yet.\")", + "docstring": "Raise an exception if all models haven't been imported yet.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:check_models_ready arg:self arguments arg If Raise Call" + }, + { + "library": "tensorflow", + "name": "hann_window", + "source_code": "@tf_export('signal.hann_window')\n@dispatch.add_dispatch_support\ndef hann_window(window_length, periodic=True, dtype=dtypes.float32, name=None):\n return _raised_cosine_window(name, 'hann_window', window_length, periodic, dtype, 0.5, 0.5)", + "docstring": "Generate a [Hann window][hann]. Args: window_length: A scalar indicating the window length to generate. periodic: A bool indicating whether to generate a periodic or symmetric window. Periodic windows are typically used for spectral analysis while symmetric windows are typically used for digital filter design. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . Raises: ValueError: If is not a floating point type. [hann]:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py", + "ast_data": "FunctionDef name:hann_window arg:window_length arg:periodic arg:dtype arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "halt_ordering", + "source_code": "@deprecated('`halt_ordering` is deprecated, you can safely remove this call.', category=FutureWarning)\ndef halt_ordering():\n pass", + "docstring": "Deprecated interface to temporarily disable ordering.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py", + "ast_data": "FunctionDef name:halt_ordering arguments Call" + }, + { + "library": "pytorch", + "name": "_default_alldims", + "source_code": "def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> list[int]:\n if dim is None:\n return list(range(x.ndim))\n elif not isinstance(dim, Sequence):\n return [dim]\n else:\n return list(dim)", + "docstring": "Convert Optional[DimsType] to a simple list, defaulting to all dimensions", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\fft.py", + "ast_data": "FunctionDef name:_default_alldims arg:dim arg:x arguments arg arg If Compare Return return:yes Call Call If Call Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "piecewise_arange", + "source_code": "def piecewise_arange(piecewise_idxer: Tensor) -> Tensor:\n dv = piecewise_idxer.device\n uni: Tensor\n uni, counts = torch.unique_consecutive(piecewise_idxer, return_counts=True)\n maxcnt = int(torch.max(counts).item())\n numuni = uni.shape[0]\n tmp = torch.zeros(size=(numuni, maxcnt), device=dv).bool()\n ranges = torch.arange(maxcnt, device=dv).unsqueeze(0).expand(numuni, -1)\n tmp[ranges < counts.unsqueeze(-1)] = True\n return ranges[tmp]", + "docstring": "Count repeated indices. Example: [0, 0, 0, 3, 3, 3, 3, 1, 1, 2] -> [0, 1, 2, 0, 1, 2, 3, 0, 1, 0]", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\utils.py", + "ast_data": "FunctionDef name:piecewise_arange arg:piecewise_idxer arguments arg Assign Assign Call Assign Call Call Call Assign Assign Call Call Assign Call Call Call Assign Compare Call Return return:yes" + }, + { + "library": "sphinx", + "name": "init", + "source_code": "def init(self) -> None:\n pass", + "docstring": "Load necessary templates and perform initialization. The default implementation does nothing.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:init arg:self arguments arg" + }, + { + "library": "authlib", + "name": "validate_claims_locales_supported", + "source_code": "def validate_claims_locales_supported(self):\n validate_array_value(self, 'claims_locales_supported')", + "docstring": "OPTIONAL. Languages and scripts supported for values in Claims being returned, represented as a JSON array of BCP47 [RFC5646] language tag values. Not all languages and scripts are necessarily supported for all Claim values.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", + "ast_data": "FunctionDef name:validate_claims_locales_supported arg:self arguments arg Call" + }, + { + "library": "django", + "name": "__getstate__", + "source_code": "def __getstate__(self):\n state = self.__dict__.copy()\n state['_state'] = copy.copy(state['_state'])\n state['_state'].fields_cache = state['_state'].fields_cache.copy()\n _memoryview_attrs = []\n for attr, value in state.items():\n if isinstance(value, memoryview):\n _memoryview_attrs.append((attr, bytes(value)))\n if _memoryview_attrs:\n state['_memoryview_attrs'] = _memoryview_attrs\n for attr, value in _memoryview_attrs:\n state.pop(attr)\n return state", + "docstring": "Hook to allow choosing the attributes to pickle.", + "type": "method", + "file_path": "django\\django\\db\\models\\base.py", + "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Assign Call Assign Call Assign For Call If Call Call Call If Assign For Call Return return:yes" + }, + { + "library": "pandas", + "name": "_write", + "source_code": "def _write(self, to_write: str) -> None:\n self.handles.handle.write(to_write.encode(self._encoding))", + "docstring": "Helper to call encode before writing to file for Python 3 compat.", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_write arg:self arg:to_write arguments arg arg Call Call" + }, + { + "library": "numpy", + "name": "get_names_flat", + "source_code": "def get_names_flat(adtype):\n listnames = []\n names = adtype.names\n for name in names:\n listnames.append(name)\n current = adtype[name]\n if current.names is not None:\n listnames.extend(get_names_flat(current))\n return tuple(listnames)", + "docstring": "Returns the field names of the input datatype as a tuple. Input datatype must have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb')", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:get_names_flat arg:adtype arguments arg Assign Assign For Call Assign If Compare Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "unicode_split_with_offsets", + "source_code": "@tf_export('strings.unicode_split_with_offsets')\n@dispatch.add_dispatch_support\ndef unicode_split_with_offsets(input, input_encoding, errors='replace', replacement_char=65533, name=None):\n with ops.name_scope(name, 'UnicodeSplitWithOffsets', [input]):\n codepoints, offsets = _unicode_decode(input, input_encoding, errors, replacement_char, False, with_offsets=True)\n chars = unicode_encode(ragged_array_ops.expand_dims(codepoints, -1), output_encoding=input_encoding, errors=errors, replacement_char=replacement_char)\n return (chars, offsets)", + "docstring": "Splits each string into a sequence of code points with start offsets. This op is similar to , but it also returns the start offset for each character in its respective string. This information can be used to align the characters with the original byte sequence. Returns a tuple where: * is the substring of that encodes its th character, when decoded using . * is the start byte offset for the th character in , when decoded using . Args: input: An dimensional potentially ragged tensor with shape . must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * : Raise an exception for any illegal substrings. * : Replace illegal substrings with . * : Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in when . name: A name for the operation (optional). Returns: A tuple of dimensional tensors . * is an tensor with shape . * is an tensor with shape . The returned tensors are s if is a scalar, or s otherwise. #### Example: >>> input = [s.encode('utf8') for s in (u'G\\xf6\\xf6dnight', u'\\U0001f60a')] >>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8') >>> result[0].to_list() # character substrings [[b'G', b'\\xc3\\xb6', b'\\xc3\\xb6', b'd', b'n', b'i', b'g', b'h', b't'], [b'\\xf0\\x9f\\x98\\x8a']] >>> result[1].to_list() # offsets [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py", + "ast_data": "FunctionDef name:unicode_split_with_offsets arg:input arg:input_encoding arg:errors arg:replacement_char arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_DefaultReplicaContext", + "source_code": "class _DefaultReplicaContext(ReplicaContext):\n\n @property\n def replica_id_in_sync_group(self):\n return 0", + "docstring": "ReplicaContext for _DefaultDistributionStrategy.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:_DefaultReplicaContext FunctionDef name:replica_id_in_sync_group arg:self arguments arg Return return:yes" + }, + { + "library": "uvicorn", + "name": "message_with_placeholders", + "source_code": "def message_with_placeholders(message: Any) -> Any:\n new_message = message.copy()\n for attr in PLACEHOLDER_FORMAT.keys():\n if message.get(attr) is not None:\n content = message[attr]\n placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content))\n new_message[attr] = placeholder\n return new_message", + "docstring": "Return an ASGI message, with any body-type content omitted and replaced with a placeholder.", + "type": "function", + "file_path": "uvicorn\\uvicorn\\middleware\\message_logger.py", + "ast_data": "FunctionDef name:message_with_placeholders arg:message arguments arg Assign Call For Call If Compare Call Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "warp_grid3d", + "source_code": "def warp_grid3d(grid: Tensor, src_homo_dst: Tensor) -> Tensor:\n batch_size: int = src_homo_dst.size(0)\n _, depth, height, width, _ = grid.size()\n grid = grid.expand(batch_size, -1, -1, -1, -1)\n if len(src_homo_dst.shape) == 3:\n src_homo_dst = src_homo_dst.view(batch_size, 1, 4, 4)\n flow: Tensor = transform_points(src_homo_dst, grid.to(src_homo_dst))\n return flow.view(batch_size, depth, height, width, 3)", + "docstring": "Compute the grid to warp the coordinates grid by the homography/ies. Args: grid: Unwrapped grid of the shape :math:. src_homo_dst: Homography or homographies (stacked) to transform all points in the grid. Shape of the homography has to be :math: or :math:. Returns: the transformed grid of shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", + "ast_data": "FunctionDef name:warp_grid3d arg:grid arg:src_homo_dst arguments arg arg Call Assign Call Assign Call If Compare Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "download_reference", + "source_code": "class download_reference(nodes.reference):\n pass", + "docstring": "Node for download references, similar to pending_xref.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:download_reference" + }, + { + "library": "tensorflow", + "name": "LayerAttributes", + "source_code": "class LayerAttributes(SerializedAttributes.with_attributes('LayerAttributes', checkpointable_objects=['non_trainable_variables', 'layers', 'metrics', 'layer_regularization_losses', 'layer_metrics'], functions=['call_and_return_conditional_losses', 'activity_regularizer_fn'], copy_from=[CommonEndpoints])):\n pass", + "docstring": "Layer checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from CommonEndpoints non_trainable_variables: List of non-trainable variables in the layer and its sublayers. layers: List of all sublayers. metrics: List of all metrics in the layer and its sublayers. call_and_return_conditional_losses: Function that takes inputs and returns a tuple of (outputs of the call function, list of input-dependent losses). The list of losses excludes the activity regularizer function, which is separate to allow the deserialized Layer object to define a different activity regularizer. activity_regularizer_fn: Callable that returns the activity regularizer loss layer_regularization_losses: List of losses owned only by this layer. layer_metrics: List of metrics owned by this layer.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "ClassDef name:LayerAttributes Call" + }, + { + "library": "pytorch", + "name": "AutogradStateOpsFailSafeguard", + "source_code": "class AutogradStateOpsFailSafeguard(TorchFunctionMode):\n\n def __torch_function__(self, func, types, args=(), kwargs=None):\n kwargs = kwargs or {}\n unsupported_grad_mode_ops = [torch._C._set_grad_enabled]\n current_state = torch._C.is_grad_enabled()\n if func in unsupported_grad_mode_ops:\n assert len(args) == 1\n changed_state = args[0]\n mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)\n if mode and isinstance(mode, ProxyTorchDispatchMode) and (not mode.pre_dispatch) and (changed_state != current_state):\n raise RuntimeError(f\"Encountered autograd state manager op {func} trying to change global autograd state while exporting. This is unsafe because we don't capture this op in torch.export today, hence we can't reflect the user intention soundly. You can fix this by adding a torch.no_grad() context around the export call.\")\n return func(*args, **kwargs)", + "docstring": "Detect grad state ops during exporting the graph and fail the process by raising an error, to avoid unexpected behavior. Those grad mode ops could be: Export with predispatch mode is exempted.", + "type": "class", + "file_path": "pytorch\\torch\\export\\_safeguard.py", + "ast_data": "ClassDef name:AutogradStateOpsFailSafeguard FunctionDef name:__torch_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Assign BoolOp Assign Assign Call If Compare Compare Call Assign Assign Call If BoolOp Call Compare Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_fix_pts_xy_order", + "source_code": "def _fix_pts_xy_order(self, pts):\n return pts[:, ::-1] if self.t_direction == 'y' else pts", + "docstring": "Fix pts calculation results with . In the workflow, it is assumed that is 'x'. If this is not true, we need to exchange the coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_fix_pts_xy_order arg:self arg:pts arguments arg arg Return return:yes Compare" + }, + { + "library": "scipy", + "name": "splrep", + "source_code": "def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, full_output=0, per=0, quiet=1):\n res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)\n return res", + "docstring": "Find the B-spline representation of a 1-D curve. .. legacy:: function Specifically, we recommend using in new code. Given the set of data points `. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import splev, splrep >>> x = np.linspace(0, 10, 10) >>> y = np.sin(x) >>> spl = splrep(x, y) >>> x2 = np.linspace(0, 10, 200) >>> y2 = splev(x2, spl) >>> plt.plot(x, y, 'o', x2, y2) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py", + "ast_data": "FunctionDef name:splrep arg:x arg:y arg:w arg:xb arg:xe arg:k arg:task arg:s arg:t arg:full_output arg:per arg:quiet arguments arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "EmptyDataError", + "source_code": "class EmptyDataError(ValueError):\n pass", + "docstring": "Exception raised in `` when empty data or header is encountered. This error is typically encountered when attempting to read an empty file or an invalid file where no data or headers are present. See Also -------- read_csv : Read a comma-separated values (CSV) file into DataFrame. errors.ParserError : Exception that is raised by an error encountered in parsing file contents. errors.DtypeWarning : Warning raised when reading different dtypes in a column from a file. Examples -------- >>> from io import StringIO >>> empty = StringIO() >>> pd.read_csv(empty) Traceback (most recent call last): EmptyDataError: No columns to parse from file", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:EmptyDataError" + }, + { + "library": "scikit-learn", + "name": "_count_nonzero", + "source_code": "def _count_nonzero(X, axis=None, sample_weight=None, xp=None, device=None):\n from .sparsefuncs import count_nonzero\n xp, _ = get_namespace(X, sample_weight, xp=xp)\n if _is_numpy_namespace(xp) and sp.issparse(X):\n return count_nonzero(X, axis=axis, sample_weight=sample_weight)\n assert X.ndim == 2\n weights = xp.ones_like(X, device=device)\n if sample_weight is not None:\n sample_weight = xp.asarray(sample_weight, device=device)\n sample_weight = xp.reshape(sample_weight, (sample_weight.shape[0], 1))\n weights = xp.astype(weights, sample_weight.dtype) * sample_weight\n zero_scalar = xp.asarray(0, device=device, dtype=weights.dtype)\n return xp.sum(xp.where(X != 0, weights, zero_scalar), axis=axis)", + "docstring": "A variant of for the Array API. If the array is sparse, and we are using the numpy namespace then we simply call the original function. This function only supports 2D arrays.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:_count_nonzero arg:X arg:axis arg:sample_weight arg:xp arg:device arguments arg arg arg arg arg Assign Call If BoolOp Call Call Return return:yes Call Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Compare" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, grid_helper, side, nth_coord_ticks=None):\n super().__init__(loc=side)\n self.grid_helper = grid_helper\n if nth_coord_ticks is None:\n nth_coord_ticks = self.nth_coord\n self.nth_coord_ticks = nth_coord_ticks\n self.side = side", + "docstring": "nth_coord = along which coordinate value varies. nth_coord = 0 -> x axis, nth_coord = 1 -> y axis", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:grid_helper arg:side arg:nth_coord_ticks arguments arg arg arg arg Call Call Assign If Compare Assign Assign Assign" + }, + { + "library": "numpy", + "name": "Methods0DFloatInt", + "source_code": "class Methods0DFloatInt(Benchmark):\n params = [['__int__', '__float__'], [dt for dt in TYPES1 if not dt.startswith('complex')]]\n param_names = ['methods', 'npdtypes']\n timeout = 10\n\n def setup(self, methname, npdtypes):\n self.xarg = np.array(3, dtype=npdtypes)\n\n def time_ndarray__0d__(self, methname, npdtypes):\n meth = getattr(self.xarg, methname)\n meth()", + "docstring": "Zero dimension array methods", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py", + "ast_data": "ClassDef name:Methods0DFloatInt Assign Call Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call FunctionDef name:time_ndarray__0d__ arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call" + }, + { + "library": "kornia", + "name": "mask_pts_at_padded_regions", + "source_code": "@torch.no_grad()\ndef mask_pts_at_padded_regions(grid_pt: Tensor, mask: Tensor) -> Tensor:\n n, h, w = mask.shape\n mask = mask.reshape(n, h * w).unsqueeze(-1).repeat(1, 1, 2)\n grid_pt[~mask.bool()] = 0\n return grid_pt", + "docstring": "For megadepth dataset, zero-padding exists in images.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\loftr\\utils\\supervision.py", + "ast_data": "FunctionDef name:mask_pts_at_padded_regions arg:grid_pt arg:mask arguments arg arg Assign Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "size", + "source_code": "def size(self):\n return stat(self.__name).length", + "docstring": "Returns the size of the file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "evaluate_sym_node", + "source_code": "def evaluate_sym_node(self, sym_node: SymNode, size_oblivious: bool=False, fallback_value: Optional[bool]=None) -> sympy.Basic:\n self._expr_sym_node_id = id(sym_node)\n return self.evaluate_expr(sym_node.expr, sym_node.hint, sym_node.fx_node, size_oblivious, fallback_value=fallback_value)", + "docstring": "Given a a SymNode, evaluates sym_node.expr, adding guards if necessary.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:evaluate_sym_node arg:self arg:sym_node arg:size_oblivious arg:fallback_value arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_all_saveable_objects", + "source_code": "def _all_saveable_objects(scope=None):\n return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) + ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope)", + "docstring": "Returns all variables and s that must be checkpointed. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of and to be checkpointed", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_all_saveable_objects arg:scope arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "latest_post_date", + "source_code": "def latest_post_date(self):\n latest_date = None\n date_keys = ('updateddate', 'pubdate')\n for item in self.items:\n for date_key in date_keys:\n item_date = item.get(date_key)\n if item_date:\n if latest_date is None or item_date > latest_date:\n latest_date = item_date\n return latest_date or datetime.datetime.now(tz=datetime.UTC)", + "docstring": "Return the latest item's pubdate or updateddate. If no items have either of these attributes this return the current UTC date/time.", + "type": "method", + "file_path": "django\\django\\utils\\feedgenerator.py", + "ast_data": "FunctionDef name:latest_post_date arg:self arguments arg Assign Assign For For Assign Call If If BoolOp Compare Compare Assign Return return:yes BoolOp Call" + }, + { + "library": "numpy", + "name": "_recursive_printoption", + "source_code": "def _recursive_printoption(result, mask, printopt):\n names = result.dtype.names\n if names is not None:\n for name in names:\n curdata = result[name]\n curmask = mask[name]\n _recursive_printoption(curdata, curmask, printopt)\n else:\n np.copyto(result, printopt, where=mask)", + "docstring": "Puts printoptions in result where mask is True. Private function allowing for recursion", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:_recursive_printoption arg:result arg:mask arg:printopt arguments arg arg arg Assign If Compare For Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_tag_callable", + "source_code": "def _tag_callable(loss):\n if callable(loss):\n with autocast_variable.enable_auto_cast_variables(None):\n loss = loss()\n if loss is None:\n return None\n if not tensor_util.is_tf_type(loss):\n loss = tensor_conversion.convert_to_tensor_v2_with_dispatch(loss, dtype=backend.floatx())\n loss._unconditional_loss = True\n return loss", + "docstring": "Tags callable loss tensor as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:_tag_callable arg:loss arguments arg If Call With Call Assign Call If Compare Return return:no If Call Assign Call Call Assign Return return:yes" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X25519PublicKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "infer_inputs_from_restored_call_function", + "source_code": "def infer_inputs_from_restored_call_function(fn):\n\n def common_spec(x, y):\n common_shape = get_common_shape(x.shape, y.shape)\n if isinstance(x, sparse_tensor.SparseTensorSpec):\n return sparse_tensor.SparseTensorSpec(common_shape, x.dtype)\n elif isinstance(x, ragged_tensor.RaggedTensorSpec):\n return ragged_tensor.RaggedTensorSpec(common_shape, x.dtype)\n return tensor_spec.TensorSpec(common_shape, x.dtype, x.name)\n spec = fn.concrete_functions[0].structured_input_signature[0][0]\n for concrete in fn.concrete_functions[1:]:\n spec2 = concrete.structured_input_signature[0][0]\n spec = nest.map_structure(common_spec, spec, spec2)\n return spec", + "docstring": "Returns TensorSpec of inputs from a restored call function. Args: fn: Restored layer call function. It is assumed that has at least one concrete function and that the inputs are in the first argument. Returns: TensorSpec of call function inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:infer_inputs_from_restored_call_function arg:fn arguments arg FunctionDef name:common_spec arg:x arg:y arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call Assign For Assign Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "encode_data", + "source_code": "def encode_data(self, data, attributes):\n current_row = 0\n for inst in data:\n if len(inst) != len(attributes):\n raise BadObject('Instance %d has %d attributes, expected %d' % (current_row, len(inst), len(attributes)))\n new_data = []\n for value in inst:\n if value is None or value == '' or value != value:\n s = '?'\n else:\n s = encode_string(str(value))\n new_data.append(s)\n current_row += 1\n yield ','.join(new_data)", + "docstring": "(INTERNAL) Encodes a line of data. Data instances follow the csv format, i.e, attribute values are delimited by commas. After converted from csv. :param data: a list of values. :param attributes: a list of attributes. Used to check if data is valid. :return: a string with the encoded data line.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "FunctionDef name:encode_data arg:self arg:data arg:attributes arguments arg arg arg Assign For If Compare Call Call Raise Call Call Call Assign For If BoolOp Compare Compare Compare Assign Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_model_proto", + "source_code": "@tf_export('data.experimental.get_model_proto')\ndef get_model_proto(iterator) -> model_pb2.ModelProto:\n if isinstance(iterator, iterator_ops.OwnedIterator):\n iterator_resource = iterator._iterator_resource\n elif isinstance(iterator, dataset_ops.NumpyIterator):\n iterator_resource = iterator._iterator._iterator_resource\n else:\n raise ValueError('Only supports `tf.data.Iterator`-typed `iterator`.')\n if not context.executing_eagerly():\n raise ValueError(f'{get_model_proto.__name__} is not supported in graph mode.')\n model_proto_string_tensor = ged_ops.iterator_get_model_proto(iterator_resource)\n model_proto_bytes = model_proto_string_tensor.numpy()\n return model_pb2.ModelProto.FromString(model_proto_bytes)", + "docstring": "Gets the analytical model inside of as . Args: iterator: An or Returns: The model inside of this iterator as a model proto. Raises: NotFoundError: If this iterator's autotune is not enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\iterator_model_ops.py", + "ast_data": "FunctionDef name:get_model_proto arg:iterator arguments arg If Call Assign If Call Assign Raise Call If Call Raise Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_Context", + "source_code": "class _Context(object):\n __slots__ = ['_lock', '_group_id']\n\n def __init__(self, lock, group_id):\n self._lock = lock\n self._group_id = group_id\n\n def __enter__(self):\n self._lock.acquire(self._group_id)\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n del type_arg, value_arg, traceback_arg\n self._lock.release(self._group_id)", + "docstring": "Context manager helper for .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", + "ast_data": "ClassDef name:_Context Assign FunctionDef name:__init__ arg:self arg:lock arg:group_id arguments arg arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:type_arg arg:value_arg arg:traceback_arg arguments arg arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "BoolGaugeCell", + "source_code": "class BoolGaugeCell(object):\n __slots__ = ['_cell']\n\n def __init__(self, cell):\n self._cell = cell\n\n def set(self, value):\n pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value)\n\n def value(self):\n return pywrap_tfe.TFE_MonitoringBoolGaugeCellValue(self._cell)", + "docstring": "A single boolean value stored in an .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "ClassDef name:BoolGaugeCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:set arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "random_contrast", + "source_code": "@tf_export('image.random_contrast')\n@dispatch.add_dispatch_support\ndef random_contrast(image, lower, upper, seed=None):\n if upper <= lower:\n raise ValueError('upper must be > lower.')\n if lower < 0:\n raise ValueError('lower must be non-negative.')\n contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)\n return adjust_contrast(image, contrast_factor)", + "docstring": "Adjust the contrast of an image or images by a random factor. Equivalent to but uses a randomly picked in the interval . For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) Returns: The contrast-adjusted image(s). Raises: ValueError: if or if .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:random_contrast arg:image arg:lower arg:upper arg:seed arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "normalize_keypoints", + "source_code": "@custom_fwd(cast_inputs=torch.float32)\ndef normalize_keypoints(kpts: Tensor, size: Tensor) -> Tensor:\n if isinstance(size, torch.Size):\n size = torch.tensor(size)[None]\n shift = size.float().to(kpts) / 2\n scale = size.max(1).values.float().to(kpts) / 2\n kpts = (kpts - shift[:, None]) / scale[:, None, None]\n return kpts", + "docstring": "Normalize tensor of keypoints.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\lightglue.py", + "ast_data": "FunctionDef name:normalize_keypoints arg:kpts arg:size arguments arg arg If Call Assign Call Assign Call Call Assign Call Call Call Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_get_boolean_mapping", + "source_code": "def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n return mapping", + "docstring": "Identify evenly-spaced values using interval or explicit mapping.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:_get_boolean_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call FunctionDef name:mapping arg:x arguments arg Assign Call Call Assign Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_step", + "source_code": "def _step(time, output_ta_t, prev_output, *states):\n current_input = tuple((ta.read(time) for ta in input_ta))\n current_input = nest.pack_sequence_as(inputs, current_input)\n mask_t = masking_fn(time)\n output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n flat_output = nest.flatten(output)\n flat_mask_output = flat_zero_output if zero_output_for_mask else nest.flatten(prev_output)\n flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n flat_state = nest.flatten(states)\n flat_new_state = nest.flatten(new_states)\n for state, new_state in zip(flat_state, flat_new_state):\n if isinstance(new_state, tensor_lib.Tensor):\n new_state.set_shape(state.shape)\n flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n new_states = nest.pack_sequence_as(new_states, flat_final_state)\n output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output)))\n return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", + "docstring": "RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_step arg:time arg:output_ta_t arg:prev_output arguments arg arg arg arg Assign Call Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call For Call If Call Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "resource_exists", + "source_code": "def resource_exists(_package_or_requirement, _resource_name):\n return False", + "docstring": "A stub for when we fail to import this function. :return: Always returns False", + "type": "function", + "file_path": "pygame\\src_py\\pkgdata.py", + "ast_data": "FunctionDef name:resource_exists arg:_package_or_requirement arg:_resource_name arguments arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "make_samplers", + "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n gain = _range_bound(self.gain, 'gain', device=device, dtype=dtype)\n self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args=False)\n center = _range_bound(self.center, 'center', device=device, dtype=dtype)\n self.center_sampler = UniformDistribution(center[0], center[1], validate_args=False)\n sigma = _range_bound(self.sigma, 'sigma', device=device, dtype=dtype)\n self.sigma_sampler = UniformDistribution(sigma[0], sigma[1], validate_args=False)\n sign = _range_bound(self.sign, 'sign', bounds=(-1.0, 1.0), center=0.0, device=device, dtype=dtype)\n self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args=False)", + "docstring": "Create samplers for generating random gaussian illumination parameters.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\gaussian_illumination.py", + "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "matplotlib", + "name": "set_file", + "source_code": "def set_file(self, file):\n self._file = os.fspath(file) if file is not None else None", + "docstring": "Set the filename of the fontfile to use. In this case, all other properties will be ignored.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:set_file arg:self arg:file arguments arg arg Assign Compare Call" + }, + { + "library": "tensorflow", + "name": "confusion_matrix_v1", + "source_code": "@tf_export(v1=['math.confusion_matrix', 'confusion_matrix'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('confusion_matrix', 'train.confusion_matrix')\ndef confusion_matrix_v1(labels, predictions, num_classes=None, dtype=dtypes.int32, name=None, weights=None):\n return confusion_matrix(labels, predictions, num_classes, weights, dtype, name)", + "docstring": "Computes the confusion matrix from predictions and labels. The matrix columns represent the prediction labels and the rows represent the real labels. The confusion matrix is always a 2-D array of shape , where is the number of valid labels for a given classification task. Both prediction and labels must be 1-D arrays of the same shape in order for this function to work. If is , then will be set to one plus the maximum value in either predictions or labels. Class labels are expected to start at 0. For example, if is 3, then the possible labels would be . If is not , then each prediction contributes its corresponding weight to the total value of the confusion matrix cell. For example: Note that the possible labels are assumed to be , resulting in a 5x5 confusion matrix. Args: labels: 1-D of real labels for the classification task. predictions: 1-D of predictions for a given classification. num_classes: The possible number of labels the classification task can have. If this value is not provided, it will be calculated using both predictions and labels array. dtype: Data type of the confusion matrix. name: Scope name. weights: An optional whose shape matches . Returns: A of type with shape representing the confusion matrix, where is the number of possible labels in the classification task. Raises: ValueError: If both predictions and labels are not 1-D vectors and have mismatched shapes, or if is not and its shape doesn't match .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\confusion_matrix.py", + "ast_data": "FunctionDef name:confusion_matrix_v1 arg:labels arg:predictions arg:num_classes arg:dtype arg:name arg:weights arguments arg arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "parse", + "source_code": "def parse(src, preamble_len=0, single_node=True):\n module_node = gast.parse(src)\n nodes = module_node.body\n if preamble_len:\n nodes = nodes[preamble_len:]\n if single_node:\n if len(nodes) != 1:\n raise ValueError('expected exactly one node, got {}'.format(nodes))\n return nodes[0]\n return nodes", + "docstring": "Returns the AST of given piece of code. Args: src: Text preamble_len: Int, indicates leading nodes in the parsed AST which should be dropped. single_node: Bool, whether is assumed to be represented by exactly one AST node. Returns: ast.AST", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py", + "ast_data": "FunctionDef name:parse arg:src arg:preamble_len arg:single_node arguments arg arg arg Assign Call Assign If Assign If If Compare Call Raise Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "autofmt_xdate", + "source_code": "def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which='major'):\n _api.check_in_list(['major', 'minor', 'both'], which=which)\n axes = [ax for ax in self.axes if ax._label != '']\n allsubplots = all((ax.get_subplotspec() for ax in axes))\n if len(axes) == 1:\n for label in self.axes[0].get_xticklabels(which=which):\n label.set_ha(ha)\n label.set_rotation(rotation)\n elif allsubplots:\n for ax in axes:\n if ax.get_subplotspec().is_last_row():\n for label in ax.get_xticklabels(which=which):\n label.set_ha(ha)\n label.set_rotation(rotation)\n else:\n for label in ax.get_xticklabels(which=which):\n label.set_visible(False)\n ax.set_xlabel('')\n engine = self.get_layout_engine()\n if allsubplots and (engine is None or engine.adjust_compatible):\n self.subplots_adjust(bottom=bottom)\n self.stale = True", + "docstring": "Date ticklabels often overlap, so it is useful to rotate them and right align them. Also, a common use case is a number of subplots with shared x-axis where the x-axis is date data. The ticklabels are often long, and it helps to rotate them on the bottom subplot and turn them off on other subplots, as well as turn off xlabels. Parameters ---------- bottom : float, default: 0.2 The bottom of the subplots for . rotation : float, default: 30 degrees The rotation angle of the xtick labels in degrees. ha : {'left', 'center', 'right'}, default: 'right' The horizontal alignment of the xticklabels. which : {'major', 'minor', 'both'}, default: 'major' Selects which ticklabels to rotate.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:autofmt_xdate arg:self arg:bottom arg:rotation arg:ha arg:which arguments arg arg arg arg arg Call Assign Compare Assign Call Call If Compare Call For Call Call Call If For If Call Call For Call Call Call For Call Call Call Assign Call If BoolOp BoolOp Compare Call Assign" + }, + { + "library": "scipy", + "name": "logpdf", + "source_code": "@abstractmethod\ndef logpdf(self, x, /, *, method):\n raise NotImplementedError()", + "docstring": "Log of the probability density function The probability density function (\"PDF\"), denoted :math:, is the probability *per unit length* that the random variable will assume the value :math:. Mathematically, it can be defined as the derivative of the cumulative distribution function :math:: .. math:: f(x) = \\frac{d}{dx} F(x) computes the logarithm of the probability density function (\"log-PDF\"), :math:, but it may be numerically favorable compared to the naive implementation (computing :math: and taking the logarithm). accepts for :math:. Parameters ---------- x : array_like The argument of the log-PDF. method : {None, 'formula', 'logexp'} The strategy used to evaluate the log-PDF. By default (`methodmethodx[l, r]-\\infty\\log(0)x r\\log(1) = 0pdflogpdf`) elsewhere. References ---------- .. [1] Probability density function, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> import numpy as np >>> from scipy import stats >>> X = stats.Uniform(a=-1.0, b=1.0) Evaluate the log-PDF at the desired argument: >>> X.logpdf(0.5) -0.6931471805599453 >>> np.allclose(X.logpdf(0.5), np.log(X.pdf(0.5))) True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_probability_distribution.py", + "ast_data": "FunctionDef name:logpdf arguments arg arg arg Raise Call" + }, + { + "library": "numpy", + "name": "getdoc", + "source_code": "def getdoc(self):\n meth = getattr(MaskedArray, self.__name__, None) or getattr(np, self.__name__, None)\n signature = self.__name__ + get_object_signature(meth)\n if meth is not None:\n doc = f' {signature}\\n{getattr(meth, '__doc__', None)}'\n return doc", + "docstring": "Return the doc of the function (from the doc of the method).", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:getdoc arg:self arguments arg Assign BoolOp Call Call Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_global_batch_size", + "source_code": "@property\ndef _global_batch_size(self):\n return True", + "docstring": "Global and per-replica batching are equivalent for OneDeviceStrategy.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "split", + "source_code": "def split(self) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]:\n if self._parent_splitter:\n raise ValueError(\"A child ComposableSplitter's `split` method should not be called directly, since it inherit chunks from a parent object. Please call the parent's `split()` method instead.\")\n assert self._chunks is not None\n assert self._chunked_message is not None\n if not self._built:\n self.build_chunks()\n self._fix_chunks()\n self._built = True\n return (self._chunks, self._chunked_message)", + "docstring": "Splits a proto message into a Sequence of protos/bytes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", + "ast_data": "FunctionDef name:split arg:self arguments arg If Raise Call Compare Compare If Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_poly1d", + "source_code": "def _poly1d(c_or_r, *, xp):\n c_or_r = xpx.atleast_nd(c_or_r, ndim=1, xp=xp)\n if c_or_r.ndim > 1:\n raise ValueError('Polynomial must be 1d only.')\n c_or_r = _trim_zeros(c_or_r, trim='f')\n if c_or_r.shape[0] == 0:\n c_or_r = xp.asarray([0], dtype=c_or_r.dtype)\n return c_or_r", + "docstring": "Constructor of np.poly1d object from an array of coefficients (r=False)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_polyutils.py", + "ast_data": "FunctionDef name:_poly1d arg:c_or_r arguments arg arg Assign Call If Compare Raise Call Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, callable, name=None):\n Tool.__init__(self, 'before_handler', callable, name)", + "docstring": "Initialize a handler tool.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:callable arg:name arguments arg arg arg Call" + }, + { + "library": "pytorch", + "name": "emit_dispatch_call", + "source_code": "def emit_dispatch_call(f: NativeFunction, input_base: str, unpacked_args: Sequence[str]) -> str:\n dispatch_key_set = 'ks & c10::after_autograd_keyset'\n call = CALL_REDISPATCH.substitute(api_name=cpp.name(f.func, faithful_name_for_out_overloads=True, symint_overload=f.func.has_symint()), unpacked_args=[dispatch_key_set] + list(unpacked_args))\n return call", + "docstring": "Dispatch call via function in a namespace or method on Tensor.", + "type": "function", + "file_path": "pytorch\\tools\\autograd\\gen_variable_type.py", + "ast_data": "FunctionDef name:emit_dispatch_call arg:f arg:input_base arg:unpacked_args arguments arg arg arg Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_no_legacy_layers", + "source_code": "def assert_no_legacy_layers(layers):\n legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]\n if legacy_layers:\n layer_str = '\\n'.join((' ' + str(l) for l in legacy_layers))\n raise TypeError('The following are legacy tf.layers.Layers:\\n{}\\nTo use keras as a framework (for instance using the Network, Model, or Sequential classes), please use the tf.keras.layers implementation instead. (Or, if writing custom layers, subclass from tf.keras.layers rather than tf.layers)'.format(layer_str))", + "docstring": "Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:assert_no_legacy_layers arg:layers arguments arg Assign Call If Assign Call Call Raise Call Call" + }, + { + "library": "scipy", + "name": "tobanded", + "source_code": "def tobanded(self):\n d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]\n d1 = -4 * np.ones(self.n, dtype=self.dtype)\n d2 = np.ones(self.n, dtype=self.dtype)\n return np.array([d2, d1, d0]).astype(self.dtype)", + "docstring": "Construct the Sakurai matrix as a banded array.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py", + "ast_data": "FunctionDef name:tobanded arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_ensure_ndmin_ndarray_check_param", + "source_code": "def _ensure_ndmin_ndarray_check_param(ndmin):\n if ndmin not in [0, 1, 2]:\n raise ValueError(f'Illegal value of ndmin keyword: {ndmin}')", + "docstring": "Just checks if the param ndmin is supported on _ensure_ndmin_ndarray. It is intended to be used as verification before running anything expensive. e.g. loadtxt, genfromtxt", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", + "ast_data": "FunctionDef name:_ensure_ndmin_ndarray_check_param arg:ndmin arguments arg If Compare Raise Call" + }, + { + "library": "scipy", + "name": "get_blas_funcs", + "source_code": "@_memoize_get_funcs\ndef get_blas_funcs(names, arrays=(), dtype=None, ilp64=False):\n if isinstance(ilp64, str):\n if ilp64 == 'preferred':\n ilp64 = HAS_ILP64\n else:\n raise ValueError(\"Invalid value for 'ilp64'\")\n if not ilp64:\n return _get_funcs(names, arrays, dtype, 'BLAS', _fblas, _cblas, 'fblas', 'cblas', _blas_alias, ilp64=False)\n else:\n if not HAS_ILP64:\n raise RuntimeError('BLAS ILP64 routine requested, but Scipy compiled only with 32-bit BLAS')\n return _get_funcs(names, arrays, dtype, 'BLAS', _fblas_64, None, 'fblas_64', None, _blas_alias, ilp64=True)", + "docstring": "Return available BLAS function objects from names. Arrays are used to determine the optimal prefix of BLAS routines. Parameters ---------- names : str or sequence of str Name(s) of BLAS functions without type prefix. arrays : sequence of ndarrays, optional Arrays can be given to determine optimal prefix of BLAS routines. If not given, double-precision routines will be used, otherwise the most generic type in arrays will be used. dtype : str or dtype, optional Data-type specifier. Not used if is non-empty. ilp64 : {True, False, 'preferred'}, optional Whether to return ILP64 routine variant. Choosing 'preferred' returns ILP64 routine if available, and otherwise the 32-bit routine. Default: False Returns ------- funcs : list List containing the found function(s). Notes ----- This routine automatically chooses between Fortran/C interfaces. Fortran code is used whenever possible for arrays with column major order. In all other cases, C code is preferred. In BLAS, the naming convention is that all functions start with a type prefix, which depends on the type of the principal matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy types {float32, float64, complex64, complex128} respectively. The code and the dtype are stored in attributes and of the returned functions. Examples -------- >>> import numpy as np >>> import scipy.linalg as LA >>> rng = np.random.default_rng() >>> a = rng.random((3,2)) >>> x_gemv = LA.get_blas_funcs('gemv', (a,)) >>> x_gemv.typecode 'd' >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,)) >>> x_gemv.typecode 'z'", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\blas.py", + "ast_data": "FunctionDef name:get_blas_funcs arg:names arg:arrays arg:dtype arg:ilp64 arguments arg arg arg arg If Call If Compare Assign Raise Call If Return return:yes Call If Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "linetable_311_writer", + "source_code": "def linetable_311_writer(first_lineno: int):\n assert sys.version_info >= (3, 11)\n linetable = []\n lineno = first_lineno\n\n def update(positions: 'dis.Positions', inst_size):\n nonlocal lineno\n lineno_new = positions.lineno if positions else None\n\n def _update(delta, size):\n assert 0 < size <= 8\n other_varints: tuple[int, ...] = ()\n if positions and positions.lineno is not None and (positions.end_lineno is not None) and (positions.col_offset is not None) and (positions.end_col_offset is not None):\n linetable.append(240 + size - 1)\n other_varints = (positions.end_lineno - positions.lineno, positions.col_offset + 1, positions.end_col_offset + 1)\n else:\n linetable.append(232 + size - 1)\n if delta < 0:\n delta = -delta << 1 | 1\n else:\n delta <<= 1\n linetable.extend(encode_varint(delta))\n for n in other_varints:\n linetable.extend(encode_varint(n))\n if lineno_new is None:\n lineno_delta = 0\n else:\n lineno_delta = lineno_new - lineno\n lineno = lineno_new\n while inst_size > 8:\n _update(lineno_delta, 8)\n inst_size -= 8\n _update(lineno_delta, inst_size)\n return (linetable, update)", + "docstring": "Used to create typing.CodeType.co_linetable See This is the internal format of the line number table for Python 3.11", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:linetable_311_writer arg:first_lineno arguments arg Compare Assign Assign FunctionDef name:update arg:positions arg:inst_size arguments arg arg Assign FunctionDef name:_update arg:delta arg:size arguments arg arg Compare If BoolOp Compare Compare Compare Compare Call Assign Call If Compare Assign Call Call For Call Call If Compare Assign Assign Assign While Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_reduce_graph_module", + "source_code": "def _reduce_graph_module(self, gm: torch.fx.GraphModule) -> tuple[Any, tuple[dict[str, Any], str]]:\n fn, (data, imports) = gm.__reduce__()\n code = data['_code']\n code = re.sub('kernel_idx = \\\\d+', '', code)\n code = re.sub('constant_args_idx = \\\\d+', '', code)\n data['_code'] = code\n return (fn, (data, imports))", + "docstring": "Custom reducer for graph module to handle irrelevant data for user defined triton kernels Essentially what we are doing here is a huge hack where user defined triton kernel contain a dynamo time side table and the arguments to the call_function are indicies into this side table. These arguments are not for hashing purposes since we included the source code into the cache key and the numbers are prone to give false negatives due to ordering.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:_reduce_graph_module arg:self arg:gm arguments arg arg Assign Call Assign Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "differentiable_polynomial_rounding", + "source_code": "def differentiable_polynomial_rounding(input: Tensor) -> Tensor:\n input_round = input.round()\n output: Tensor = input_round + (input - input_round) ** 3\n return output", + "docstring": "Differentiable rounding. Args: input (Tensor): Input tensor of any shape to be rounded. Returns: output (Tensor): Pseudo rounded tensor of the same shape as input tensor.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\misc.py", + "ast_data": "FunctionDef name:differentiable_polynomial_rounding arg:input arguments arg Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "SparseCoefMixin", + "source_code": "class SparseCoefMixin:\n\n def densify(self):\n msg = 'Estimator, %(name)s, must be fitted before densifying.'\n check_is_fitted(self, msg=msg)\n if sp.issparse(self.coef_):\n self.coef_ = self.coef_.toarray()\n return self\n\n def sparsify(self):\n msg = 'Estimator, %(name)s, must be fitted before sparsifying.'\n check_is_fitted(self, msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self", + "docstring": "Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", + "ast_data": "ClassDef name:SparseCoefMixin FunctionDef name:densify arg:self arguments arg Assign Call If Call Assign Call Return return:yes FunctionDef name:sparsify arg:self arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_assign_bucket_subset_to_rank", + "source_code": "def _assign_bucket_subset_to_rank(self, bucket_index: int, bucket_params: list[torch.Tensor], bucket_offset: int, assigned_rank: int, assigned_ranks_per_bucket: list[set[int]]) -> None:\n overlap_info = self._overlap_info\n if len(bucket_params) == 0:\n raise ValueError('Empty bucket assignment')\n params_per_rank = overlap_info.params_per_rank\n offsets = overlap_info.offsets\n self._bucket_assignments_per_rank_cache[assigned_rank][bucket_index] = _DDPBucketAssignment(bucket_index, bucket_params, bucket_offset)\n if self.global_rank == assigned_rank:\n offsets[bucket_index] = len(params_per_rank[assigned_rank])\n params_per_rank[assigned_rank].extend(bucket_params)\n assigned_ranks_per_bucket[bucket_index].add(assigned_rank)\n self._overlap_info.num_bucket_assignments += 1", + "docstring": "Assign `DistributedDataParallelDistributedDataParallelset` of group ranks assigned to each bucket.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_assign_bucket_subset_to_rank arg:self arg:bucket_index arg:bucket_params arg:bucket_offset arg:assigned_rank arg:assigned_ranks_per_bucket arguments arg arg arg arg arg arg Assign If Compare Call Raise Call Assign Assign Assign Call If Compare Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "add_notice_to_docstring", + "source_code": "def add_notice_to_docstring(doc, instructions, no_doc_str, suffix_str, notice, notice_type='Warning'):\n allowed_notice_types = ['Deprecated', 'Warning', 'Caution', 'Important', 'Note']\n if notice_type not in allowed_notice_types:\n raise ValueError(f'Unrecognized notice type. Should be one of: {allowed_notice_types}')\n if not doc:\n lines = [no_doc_str]\n else:\n lines = _normalize_docstring(doc).splitlines()\n lines[0] += ' ' + suffix_str\n if not notice:\n raise ValueError('The `notice` arg must not be empty.')\n notice[0] = f'{notice_type}: {notice[0]}'\n notice = [''] + notice + ([instructions] if instructions else [])\n if len(lines) > 1:\n if lines[1].strip():\n notice.append('')\n lines[1:1] = notice\n else:\n lines += notice\n return '\\n'.join(lines)", + "docstring": "Adds a deprecation notice to a docstring. Args: doc: The original docstring. instructions: A string, describing how to fix the problem. no_doc_str: The default value to use for if is empty. suffix_str: Is added to the end of the first line. notice: A list of strings. The main notice warning body. notice_type: The type of notice to use. Should be one of Returns: A new docstring, with the notice attached. Raises: ValueError: If is empty.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\decorator_utils.py", + "ast_data": "FunctionDef name:add_notice_to_docstring arg:doc arg:instructions arg:no_doc_str arg:suffix_str arg:notice arg:notice_type arguments arg arg arg arg arg arg Assign If Compare Raise Call If Assign Assign Call Call If Raise Call Assign Assign If Compare Call If Call Call Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "clear", + "source_code": "def clear(self):\n self._mtx = IdentityTransform._mtx.copy()\n self.invalidate()\n return self", + "docstring": "Reset the underlying matrix to the identity transform.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_has_valid_setitem_indexer", + "source_code": "def _has_valid_setitem_indexer(self, indexer) -> bool:\n if isinstance(indexer, dict):\n raise IndexError('iloc cannot enlarge its target object')\n if isinstance(indexer, ABCDataFrame):\n raise TypeError('DataFrame indexer for .iloc is not supported. Consider using .loc with a DataFrame indexer for automatic alignment.')\n if not isinstance(indexer, tuple):\n indexer = _tuplify(self.ndim, indexer)\n for ax, i in zip(self.obj.axes, indexer):\n if isinstance(i, slice):\n pass\n elif is_list_like_indexer(i):\n pass\n elif is_integer(i):\n if i >= len(ax):\n raise IndexError('iloc cannot enlarge its target object')\n elif isinstance(i, dict):\n raise IndexError('iloc cannot enlarge its target object')\n return True", + "docstring": "Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_has_valid_setitem_indexer arg:self arg:indexer arguments arg arg If Call Raise Call If Call Raise Call If Call Assign Call For Call If Call If Call If Call If Compare Call Raise Call If Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "getmethodclass", + "source_code": "def getmethodclass(m):\n if not hasattr(m, '__name__') and hasattr(m, '__class__') and hasattr(m, '__call__'):\n if isinstance(m.__class__, type):\n return m.__class__\n m_self = getattr(m, '__self__', None)\n if m_self is not None:\n if inspect.isclass(m_self):\n return m_self\n return m_self.__class__\n owners = []\n caller_frame = tf_inspect.currentframe().f_back\n try:\n for v in itertools.chain(caller_frame.f_locals.values(), caller_frame.f_globals.values()):\n if hasattr(v, m.__name__):\n candidate = getattr(v, m.__name__)\n if hasattr(candidate, 'im_func'):\n candidate = candidate.im_func\n if hasattr(m, 'im_func'):\n m = m.im_func\n if candidate is m:\n owners.append(v)\n finally:\n del caller_frame\n if owners:\n if len(owners) == 1:\n return owners[0]\n owner_types = tuple((o if tf_inspect.isclass(o) else type(o) for o in owners))\n for o in owner_types:\n if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)):\n return o\n raise ValueError('Found too many owners of %s: %s' % (m, owners))\n return None", + "docstring": "Resolves a function's owner, e.g. a method's class. Note that this returns the object that the function was retrieved from, not necessarily the class where it was defined. This function relies on Python stack frame support in the interpreter, and has the same limitations that inspect.currentframe. Limitations. This function will only work correctly if the owned class is visible in the caller's global or local variables. Args: m: A user defined function Returns: The class that this function was retrieved from, or None if the function is not an object or class method, or the class that owns the object or method is not visible to m. Raises: ValueError: if the class could not be resolved for any unexpected reason.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py", + "ast_data": "FunctionDef name:getmethodclass arg:m arguments arg If BoolOp Call Call Call If Call Return return:yes Assign Call If Compare If Call Return return:yes Return return:yes Assign Assign Call Try For Call Call Call If Call Assign Call If Call Assign If Call Assign If Compare Call If If Compare Call Return return:yes Assign Call Call Call For If BoolOp Call Call Call Return return:yes Raise Call Return return:no" + }, + { + "library": "pandas", + "name": "column_setitem", + "source_code": "def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool=False) -> None:\n if not self._has_no_reference(loc):\n blkno = self.blknos[loc]\n blk_loc = self.blklocs[loc]\n values = self.blocks[blkno].values\n if values.ndim == 1:\n values = values.copy()\n else:\n values = values[[blk_loc]]\n self._iset_split_block(blkno, [blk_loc], values)\n col_mgr = self.iget(loc, track_ref=False)\n if inplace_only:\n col_mgr.setitem_inplace(idx, value)\n else:\n new_mgr = col_mgr.setitem((idx,), value)\n self.iset(loc, new_mgr._block.values, inplace=True)", + "docstring": "Set values (\"setitem\") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level ()", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:column_setitem arg:self arg:loc arg:idx arg:value arg:inplace_only arguments arg arg arg arg arg If Call Assign Assign Assign If Compare Assign Call Assign Call Assign Call If Call Assign Call Call" + }, + { + "library": "pandas", + "name": "dtype", + "source_code": "@property\ndef dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype:\n return self._dtype", + "docstring": "The dtype for the DatetimeArray. .. warning:: A future version of pandas will change dtype to never be a `DatetimeArray.dtype` is returned.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_pid_namespace_link", + "source_code": "def _pid_namespace_link(pid: Optional[int]=None) -> str:\n PID_NAMESPACE_PATH = '/proc/{}/ns/pid'\n pid = pid or os.getpid()\n return os.readlink(PID_NAMESPACE_PATH.format(pid))", + "docstring": "Returns the link to the process's namespace, example: pid:[4026531836]", + "type": "function", + "file_path": "pytorch\\torch\\_strobelight\\cli_function_profiler.py", + "ast_data": "FunctionDef name:_pid_namespace_link arg:pid arguments arg Assign Assign BoolOp Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "map_and_batch_with_legacy_function", + "source_code": "@deprecation.deprecated(None, 'Use `tf.data.experimental.map_and_batch()')\n@tf_export(v1=['data.experimental.map_and_batch_with_legacy_function'])\ndef map_and_batch_with_legacy_function(map_func, batch_size, num_parallel_batches=None, drop_remainder=False, num_parallel_calls=None):\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(f'`map_and_batch_with_legacy_function` allows only one of `num_parallel_batches` and `num_parallel_calls` to be set, but `num_parallel_batches` was set to {num_parallel_batches} and `num_parallel_calls` as set to {num_parallel_calls}.')\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size, num_parallel_calls, drop_remainder, use_legacy_function=True)\n return _apply_fn", + "docstring": "Fused implementation of and . NOTE: This is an escape hatch for existing uses of that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to as this method will not be removed in V2. Args: map_func: A function mapping a nested structure of tensors to another nested structure of tensors. batch_size: A scalar , representing the number of consecutive elements of this dataset to combine in a single batch. num_parallel_batches: (Optional.) A scalar , representing the number of batches to create in parallel. On one hand, higher values can help mitigate the effect of stragglers. On the other hand, higher values can increase contention if CPU is scarce. drop_remainder: (Optional.) A scalar , representing whether the last batch should be dropped in case its size is smaller than desired; the default behavior is not to drop the smaller batch. num_parallel_calls: (Optional.) A scalar , representing the number of elements to process in parallel. If not specified, elements will be processed in parallel. If the value is used, then the number of parallel calls is set dynamically based on available CPU. Returns: A transformation function, which can be passed to . Raises: ValueError: If both and are specified.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py", + "ast_data": "FunctionDef name:map_and_batch_with_legacy_function arg:map_func arg:batch_size arg:num_parallel_batches arg:drop_remainder arg:num_parallel_calls arguments arg arg arg arg arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign If BoolOp Compare Compare Raise Call FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_serialize_signature_def_map", + "source_code": "def _serialize_signature_def_map(signature_def_map: _SignatureDefMap) -> dict[str, bytes]:\n signature_def_map_serialized = {}\n for key, signature_def in signature_def_map.items():\n signature_def_map_serialized[key] = signature_def.SerializeToString()\n return signature_def_map_serialized", + "docstring": "Serializes SignatureDef values in . Args: signature_def_map: Signature key -> SignatureDef mapping. Returns: Signature def map where the values () are serialized.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py", + "ast_data": "FunctionDef name:_serialize_signature_def_map arg:signature_def_map arguments arg Assign For Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "generate_detector_report", + "source_code": "@abstractmethod\ndef generate_detector_report(self, model) -> tuple[str, dict[str, Any]]:\n pass", + "docstring": "Args model (nn.Module or subclass): model to find observer insertion points Returns a Tuple of two elements: Str: string report of the suggested improvements Dict: contains useful data collected by the observer pertinent to this report", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:generate_detector_report arg:self arg:model arguments arg arg" + }, + { + "library": "django", + "name": "_i18n_cache_key_suffix", + "source_code": "def _i18n_cache_key_suffix(request, cache_key):\n if settings.USE_I18N:\n cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())\n if settings.USE_TZ:\n cache_key += '.%s' % get_current_timezone_name()\n return cache_key", + "docstring": "If necessary, add the current locale or time zone to the cache key.", + "type": "function", + "file_path": "django\\django\\utils\\cache.py", + "ast_data": "FunctionDef name:_i18n_cache_key_suffix arg:request arg:cache_key arguments arg arg If Call Call If Call Return return:yes" + }, + { + "library": "kornia", + "name": "from_matrix", + "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> Se2:\n r = So2.from_matrix(matrix[..., :2, :2])\n t = matrix[..., :2, -1]\n return cls(r, t)", + "docstring": "Create an Se2 group from a matrix. Args: matrix: tensor of shape :math:. Example: >>> s = Se2.from_matrix(torch.eye(3).repeat(2, 1, 1)) >>> s.r Parameter containing: tensor([1.+0.j, 1.+0.j], requires_grad=True) >>> s.t Parameter containing: tensor([[0., 0.], [0., 0.]], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Assign Call Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_capstyle", + "source_code": "@_docstring.interpd\ndef set_capstyle(self, cs):\n self._capstyle = CapStyle(cs)", + "docstring": "Set the for the collection (for all its elements). Parameters ---------- cs : or %(CapStyle)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_capstyle arg:self arg:cs arguments arg arg Assign Call" + }, + { + "library": "matplotlib", + "name": "_get_font_family_and_reduced", + "source_code": "@classmethod\ndef _get_font_family_and_reduced(cls):\n ff = mpl.rcParams['font.family']\n ff_val = ff[0].lower() if len(ff) == 1 else None\n if len(ff) == 1 and ff_val in cls._font_families:\n return (ff_val, False)\n elif len(ff) == 1 and ff_val in cls._font_preambles:\n return (cls._font_types[ff_val], True)\n else:\n _log.info('font.family must be one of (%s) when text.usetex is True. serif will be used by default.', ', '.join(cls._font_families))\n return ('serif', False)", + "docstring": "Return the font family name and whether the font is reduced.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:_get_font_family_and_reduced arg:cls arguments arg Assign Assign Compare Call Call If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "wait_stream", + "source_code": "def wait_stream(self, stream) -> None:\n self.wait_event(stream.record_event())", + "docstring": "Synchronize with another stream. All future work submitted to this stream will wait until all kernels submitted to a given stream at the time of call complete. Args: stream (Stream): a stream to synchronize.", + "type": "method", + "file_path": "pytorch\\torch\\xpu\\streams.py", + "ast_data": "FunctionDef name:wait_stream arg:self arg:stream arguments arg arg Call Call" + }, + { + "library": "pytorch", + "name": "_register_post_backward_hook", + "source_code": "def _register_post_backward_hook(state: _FSDPState, handle: Optional[FlatParamHandle]) -> None:\n if not torch.is_grad_enabled():\n return\n if not handle:\n return\n flat_param = handle.flat_param\n if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n already_registered = hasattr(flat_param, '_post_backward_hook_handle')\n if already_registered or not flat_param.requires_grad:\n return\n hook = functools.partial(_post_backward_hook, state, handle)\n hook_handle = flat_param.register_post_accumulate_grad_hook(hook)\n flat_param._post_backward_hook_handle = hook_handle\n else:\n already_registered = hasattr(flat_param, '_post_backward_hook_state')\n if already_registered or not flat_param.requires_grad:\n return\n temp_flat_param = flat_param.expand_as(flat_param)\n _p_assert(temp_flat_param.grad_fn is not None, 'The `grad_fn` is needed to access the `AccumulateGrad` and register the post-backward hook')\n acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n assert acc_grad is not None\n hook_handle = acc_grad.register_hook(functools.partial(_post_backward_hook, state, handle))\n flat_param._post_backward_hook_state = (acc_grad, hook_handle)", + "docstring": "Registers post-backward hooks on the `` objects are the same.) If we instead prefer the *last* forward, then the hook runs early.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_register_post_backward_hook arg:state arg:handle arguments arg arg If Call Return return:no If Return return:no Assign If Call Assign Call If BoolOp Return return:no Assign Call Assign Call Assign Assign Call If BoolOp Return return:no Assign Call Call Compare Assign Compare Assign Call Call Assign" + }, + { + "library": "pandas", + "name": "loads", + "source_code": "def loads(bytes_object: bytes, *, fix_imports: bool=True, encoding: str='ASCII', errors: str='strict') -> Any:\n fd = io.BytesIO(bytes_object)\n return Unpickler(fd, fix_imports=fix_imports, encoding=encoding, errors=errors).load()", + "docstring": "Analogous to pickle._loads.", + "type": "function", + "file_path": "pandas\\pandas\\compat\\pickle_compat.py", + "ast_data": "FunctionDef name:loads arg:bytes_object arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "local", + "source_code": "@property\ndef local(self):\n return self.srs.local", + "docstring": "Is this Spatial Reference local?", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", + "ast_data": "FunctionDef name:local arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "theta", + "source_code": "@theta.setter\ndef theta(self, theta):\n self.kernel.theta = theta", + "docstring": "Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:theta arg:self arg:theta arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_getattribute", + "source_code": "def _getattribute(self, name):\n func__fastdict_insert = object.__getattribute__(self, '_fastdict_insert')\n if name == 'lite':\n if self._tfmw_has_lite:\n attr = self._tfmw_import_module(name)\n setattr(self._tfmw_wrapped_module, 'lite', attr)\n func__fastdict_insert(name, attr)\n return attr\n attr = object.__getattribute__(self, name)\n if name.startswith('__') or name.startswith('_tfmw_') or name.startswith('_fastdict_'):\n func__fastdict_insert(name, attr)\n return attr\n if not (self._tfmw_print_deprecation_warnings and self._tfmw_add_deprecation_warning(name, attr)):\n func__fastdict_insert(name, attr)\n return attr", + "docstring": "Imports and caches pre-defined API. Warns if necessary. This method is a replacement for __getattribute__(). It will be added into the extended python module as a callback to reduce API overhead.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py", + "ast_data": "FunctionDef name:_getattribute arg:self arg:name arguments arg arg Assign Call If Compare If Assign Call Call Call Return return:yes Assign Call If BoolOp Call Call Call Call Return return:yes If BoolOp Call Call Return return:yes" + }, + { + "library": "django", + "name": "table_name_col", + "source_code": "@classmethod\ndef table_name_col(cls):\n return 'f_table_name'", + "docstring": "Return the name of the metadata column used to store the feature table name.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py", + "ast_data": "FunctionDef name:table_name_col arg:cls arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "GaussianBlur2d", + "source_code": "class GaussianBlur2d(Module):\n\n def __init__(self, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect', separable: bool=True) -> None:\n super().__init__()\n self.kernel_size = kernel_size\n self.sigma = sigma\n self.border_type = border_type\n self.separable = separable\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, sigma={self.sigma}, border_type={self.border_type}, separable={self.separable})'\n\n def forward(self, input: Tensor) -> Tensor:\n return gaussian_blur2d(input, self.kernel_size, self.sigma, self.border_type, self.separable)", + "docstring": "Create an operator that blurs a tensor using a Gaussian filter. The operator smooths the given tensor with a gaussian kernel by convolving it to each channel. It supports batched operation. Arguments: kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples:: >>> input = torch.rand(2, 4, 5, 5) >>> gauss = GaussianBlur2d((3, 3), (1.5, 1.5)) >>> output = gauss(input) # 2x4x5x5 >>> output.shape torch.Size([2, 4, 5, 5])", + "type": "class", + "file_path": "kornia\\kornia\\filters\\gaussian.py", + "ast_data": "ClassDef name:GaussianBlur2d FunctionDef name:__init__ arg:self arg:kernel_size arg:sigma arg:border_type arg:separable arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "stop", + "source_code": "def stop(self):\n self.state = states.STOPPING\n self.log('Bus STOPPING')\n self.publish('stop')\n self.state = states.STOPPED\n self.log('Bus STOPPED')", + "docstring": "Stop all services.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Assign Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_add_collection_def", + "source_code": "@staticmethod\ndef _add_collection_def(meta_graph_def, key, export_scope=None):\n meta_graph.add_collection_def(meta_graph_def, key, export_scope=export_scope)", + "docstring": "Adds a collection to MetaGraphDef protocol buffer. Args: meta_graph_def: MetaGraphDef protocol buffer. key: One of the GraphKeys or user-defined string. export_scope: Optional . Name scope to remove.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_add_collection_def arg:meta_graph_def arg:key arg:export_scope arguments arg arg arg Call" + }, + { + "library": "scipy", + "name": "hb_read", + "source_code": "def hb_read(path_or_open_file, *, spmatrix=True):\n\n def _get_matrix(fid):\n hb = HBFile(fid)\n return hb.read_matrix()\n if hasattr(path_or_open_file, 'read'):\n data = _get_matrix(path_or_open_file)\n else:\n with open(path_or_open_file) as f:\n data = _get_matrix(f)\n if spmatrix:\n return csc_matrix(data)\n return data", + "docstring": "Read HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before reading. spmatrix : bool, optional (default: True) If ``. Returns ------- data : csc_array or csc_matrix The data read from the HB file as a sparse array. Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io import hb_read, hb_write >>> from scipy.sparse import csr_array, eye >>> data = csr_array(eye(3)) # create a sparse array >>> hb_write(\"data.hb\", data) # write a hb file >>> print(hb_read(\"data.hb\", spmatrix=False)) # read a hb file Coords Values (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0", + "type": "function", + "file_path": "scipy\\scipy\\io\\_harwell_boeing\\hb.py", + "ast_data": "FunctionDef name:hb_read arg:path_or_open_file arguments arg arg FunctionDef name:_get_matrix arg:fid arguments arg Assign Call Return return:yes Call If Call Assign Call With Call Assign Call If Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "check_is_fitted", + "source_code": "def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):\n if isclass(estimator):\n raise TypeError('{} is a class, not an instance.'.format(estimator))\n if msg is None:\n msg = \"This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\"\n if not hasattr(estimator, 'fit'):\n raise TypeError('%s is not an estimator instance.' % estimator)\n tags = get_tags(estimator)\n if not tags.requires_fit and attributes is None:\n return\n if not _is_fitted(estimator, attributes, all_or_any):\n raise NotFittedError(msg % {'name': type(estimator).__name__})", + "docstring": "Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore) and otherwise raises a :class: with the given message. If an estimator does not set any attributes with a trailing underscore, it can define a `sphx_glr_auto_examples_developing_estimators_sklearn_is_fitted.pyattributesrequires_fitestimator_tagsrequires_fitattributesNoneestimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. msg : str, default=None The default error message is, \"This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\" For custom messages if \"%(name)s\" is present in the message string, it is substituted for the estimator name. Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\". all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Raises ------ TypeError If the estimator is a class or not an estimator instance NotFittedError If the attributes are not found. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.utils.validation import check_is_fitted >>> from sklearn.exceptions import NotFittedError >>> lr = LogisticRegression() >>> try: ... check_is_fitted(lr) ... except NotFittedError as exc: ... print(f\"Model is not fitted yet.\") Model is not fitted yet. >>> lr.fit([[1, 2], [1, 3]], [1, 0]) LogisticRegression() >>> check_is_fitted(lr)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:check_is_fitted arg:estimator arg:attributes arguments arg arg arg arg If Call Raise Call Call If Compare Assign If Call Raise Call Assign Call If BoolOp Compare Return return:no If Call Raise Call Call" + }, + { + "library": "scrapy", + "name": "download_request", + "source_code": "def download_request(self, request: Request, spider: Spider) -> Deferred[Response]:\n factory = self.HTTPClientFactory(request)\n self._connect(factory)\n return factory.deferred", + "docstring": "Return a deferred for the HTTP download", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http10.py", + "ast_data": "FunctionDef name:download_request arg:self arg:request arg:spider arguments arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "handle_leaf", + "source_code": "def handle_leaf(node: DecisionTreeNode, indent: str, unsafe_leaves: list[int]) -> str:\n if node.id in unsafe_leaves:\n return f'{indent}return None'\n class_probas = node.class_probs\n return f'{indent}return {best_probas_and_indices(class_probas)}'", + "docstring": "This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic will return \"unsure\" (i.e. None).", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\ah_tree.py", + "ast_data": "FunctionDef name:handle_leaf arg:node arg:indent arg:unsafe_leaves arguments arg arg arg If Compare Return return:yes Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "fromarrays", + "source_code": "def fromarrays(arraylist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None):\n datalist = [ma.getdata(x) for x in arraylist]\n masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist]\n _array = np.rec.fromarrays(datalist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray)\n _array._mask.flat = list(zip(*masklist))\n if fill_value is not None:\n _array.fill_value = fill_value\n return _array", + "docstring": "Creates a mrecarray from a (flat) list of masked arrays. Parameters ---------- arraylist : sequence A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\mrecords.py", + "ast_data": "FunctionDef name:fromarrays arg:arraylist arg:dtype arg:shape arg:formats arg:names arg:titles arg:aligned arg:byteorder arg:fill_value arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_formatter", + "source_code": "def _formatter(x):\n if isinstance(x, np.ndarray):\n if x.size != 0:\n return np.array2string(x, separator=', ')\n else:\n return repr(x.tolist())\n else:\n return str(x)", + "docstring": "Separate Numpy array elements with comma.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:_formatter arg:x arguments arg If Call If Compare Return return:yes Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "list_children", + "source_code": "def list_children(self, obj):\n if obj not in self._children_cache:\n children = self._children_cache[obj] = {}\n for name, child in super(_AugmentedGraphView, self).list_children(obj, save_type=base.SaveType.SAVEDMODEL, cache=self._serialization_cache):\n if isinstance(child, defun.ConcreteFunction):\n child = self._maybe_uncache_variable_captures(child)\n children[name] = child\n if isinstance(obj, def_function.Function) and (not children):\n self.untraced_functions.append(obj.name)\n for name, child in self._children_cache[obj].items():\n yield base.TrackableReference(name, child)", + "docstring": "Lists children of for SavedModel.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:list_children arg:self arg:obj arguments arg arg If Compare Assign For Call Call If Call Assign Call Assign If BoolOp Call Call For Call Call" + }, + { + "library": "scipy", + "name": "gaussian", + "source_code": "def gaussian(M, std, sym=True, *, xp=None, device=None):\n xp = _namespace(xp)\n if _len_guards(M):\n return xp.ones(M, dtype=xp.float64, device=device)\n M, needs_trunc = _extend(M, sym)\n n = xp.arange(0, M, dtype=xp.float64, device=device) - (M - 1.0) / 2.0\n sig2 = 2 * std * std\n w = xp.exp(-n ** 2 / sig2)\n return _truncate(w, needs_trunc)", + "docstring": "Return a Gaussian window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. std : float The standard deviation, sigma. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. %(xp_device_snippet)s Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if is even and is True). Notes ----- The Gaussian window is defined as .. math:: w(n) = e^{ -\\frac{1}{2}\\left(\\frac{n}{\\sigma}\\right)^2 } Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.gaussian(51, std=7) >>> plt.plot(window) >>> plt.title(r\"Gaussian window ($\\sigma$=7)\") >>> plt.ylabel(\"Amplitude\") >>> plt.xlabel(\"Sample\") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(r\"Frequency response of the Gaussian window ($\\sigma$=7)\") >>> plt.ylabel(\"Normalized magnitude [dB]\") >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")", + "type": "function", + "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", + "ast_data": "FunctionDef name:gaussian arg:M arg:std arg:sym arguments arg arg arg arg arg Assign Call If Call Return return:yes Call Assign Call Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "nnlf", + "source_code": "def nnlf(self, theta, x):\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = (asarray(x) - loc) / scale\n n_log_scale = len(x) * log(scale)\n if np.any(~self._support_mask(x, *args)):\n return inf\n return self._nnlf(x, *args) + n_log_scale", + "docstring": "Negative loglikelihood function. Notes ----- This is `theta` are the parameters (including loc and scale).", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:nnlf arg:self arg:theta arg:x arguments arg arg arg Assign Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Call If Call Call Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "one_hot", + "source_code": "def one_hot(labels: Tensor, num_classes: int, device: torch.device, dtype: torch.dtype, eps: float=1e-06) -> Tensor:\n if not isinstance(labels, Tensor):\n raise TypeError(f'Input labels type is not a Tensor. Got {type(labels)}')\n if not labels.dtype == torch.int64:\n raise ValueError(f'labels must be of the same dtype torch.int64. Got: {labels.dtype}')\n if num_classes < 1:\n raise ValueError(f'The number of classes must be bigger than one. Got: {num_classes}')\n shape = labels.shape\n one_hot = zeros((shape[0], num_classes) + shape[1:], device=device, dtype=dtype)\n return one_hot.scatter_(1, labels.unsqueeze(1), 1.0) + eps", + "docstring": "Convert an integer label x-D tensor to a one-hot (x+1)-D tensor. Args: labels: tensor with labels of shape :math:, where N is batch size. Each value is an integer representing correct classification. num_classes: number of classes in labels. device: the desired device of returned tensor. dtype: the desired data type of returned tensor. eps: epsilon for numerical stability. Returns: the labels in one hot tensor of shape :math:, Examples: >>> labels = torch.LongTensor([[[0, 1], [2, 0]]]) >>> one_hot(labels, num_classes=3, device=torch.device('cpu'), dtype=torch.int64) tensor([[[[1.0000e+00, 1.0000e-06], [1.0000e-06, 1.0000e+00]], [[1.0000e-06, 1.0000e+00], [1.0000e-06, 1.0000e-06]], [[1.0000e-06, 1.0000e-06], [1.0000e+00, 1.0000e-06]]]])", + "type": "function", + "file_path": "kornia\\kornia\\utils\\one_hot.py", + "ast_data": "FunctionDef name:one_hot arg:labels arg:num_classes arg:device arg:dtype arg:eps arguments arg arg arg arg arg If Call Raise Call Call If Compare Raise Call If Compare Raise Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_matstruct_to_dict", + "source_code": "def _matstruct_to_dict(matobj):\n d = {}\n for f in matobj._fieldnames:\n elem = matobj.__dict__[f]\n if isinstance(elem, mat_struct):\n d[f] = _matstruct_to_dict(elem)\n elif _has_struct(elem):\n d[f] = _inspect_cell_array(elem)\n else:\n d[f] = elem\n return d", + "docstring": "Construct nested dicts from mat_struct objects.", + "type": "function", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:_matstruct_to_dict arg:matobj arguments arg Assign For Assign If Call Assign Call If Call Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "is_multipart", + "source_code": "def is_multipart(self):\n if self.forms:\n return self.forms[0].is_multipart()\n else:\n return self.empty_form.is_multipart()", + "docstring": "Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise.", + "type": "method", + "file_path": "django\\django\\forms\\formsets.py", + "ast_data": "FunctionDef name:is_multipart arg:self arguments arg If Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "staticfiles_urlpatterns", + "source_code": "def staticfiles_urlpatterns(prefix=None):\n if prefix is None:\n prefix = settings.STATIC_URL\n return static(prefix, view=serve)", + "docstring": "Helper function to return a URL pattern for serving static files.", + "type": "function", + "file_path": "django\\django\\contrib\\staticfiles\\urls.py", + "ast_data": "FunctionDef name:staticfiles_urlpatterns arg:prefix arguments arg If Compare Assign Return return:yes Call" + }, + { + "library": "django", + "name": "_check_form", + "source_code": "def _check_form(self, obj):\n if not _issubclass(obj.form, BaseModelForm):\n return must_inherit_from(parent='BaseModelForm', option='form', obj=obj, id='admin.E016')\n else:\n return []", + "docstring": "Check that form subclasses BaseModelForm.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_form arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "build", + "source_code": "def build(self, y_pred, y_true):\n super(MetricsContainer, self).build(y_pred)\n self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics)\n self._metrics = self._conform_to_outputs(y_pred, self._metrics)\n self._weighted_metrics = self._maybe_broadcast_to_outputs(y_pred, self._weighted_metrics)\n self._weighted_metrics = self._conform_to_outputs(y_pred, self._weighted_metrics)\n y_pred = nest.list_to_tuple(y_pred)\n y_true = nest.list_to_tuple(y_true)\n self._metrics = nest.list_to_tuple(self._metrics)\n self._weighted_metrics = nest.list_to_tuple(self._weighted_metrics)\n self._metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects, self._metrics, y_true, y_pred)\n self._weighted_metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects, self._weighted_metrics, y_true, y_pred)\n self._metrics = nest.flatten_up_to(y_pred, self._metrics, check_types=False)\n self._weighted_metrics = nest.flatten_up_to(y_pred, self._weighted_metrics, check_types=False)\n if not self._from_serialized:\n self._set_metric_names()\n self._create_ordered_metrics()\n self._built = True", + "docstring": "One-time setup of metric objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:build arg:self arg:y_pred arg:y_true arguments arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Call Assign" + }, + { + "library": "pandas", + "name": "grouped_reduce", + "source_code": "def grouped_reduce(self, func: Callable) -> Self:\n result_blocks: list[Block] = []\n for blk in self.blocks:\n if blk.is_object:\n for sb in blk._split():\n applied = sb.apply(func)\n result_blocks = extend_blocks(applied, result_blocks)\n else:\n applied = blk.apply(func)\n result_blocks = extend_blocks(applied, result_blocks)\n if len(result_blocks) == 0:\n nrows = 0\n else:\n nrows = result_blocks[0].values.shape[-1]\n index = default_index(nrows)\n return type(self).from_blocks(result_blocks, [self.axes[0], index])", + "docstring": "Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:grouped_reduce arg:self arg:func arguments arg arg For If For Call Assign Call Assign Call Assign Call Assign Call If Compare Call Assign Assign Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_check_expression", + "source_code": "def _check_expression(expr) -> None:\n if not expr:\n raise ValueError('expr cannot be an empty string')", + "docstring": "Make sure an expression is not an empty string Parameters ---------- expr : object An object that can be converted to a string Raises ------ ValueError * If expr is an empty string", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\eval.py", + "ast_data": "FunctionDef name:_check_expression arg:expr arguments arg If Raise Call" + }, + { + "library": "tensorflow", + "name": "from_state", + "source_code": "@classmethod\ndef from_state(cls, state, alg):\n return cls(alg=alg, state=state)", + "docstring": "Creates a generator from a state. See for description of and . Args: state: the new state. alg: the RNG algorithm. Returns: The new generator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:from_state arg:cls arg:state arg:alg arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_register_root_pre_forward_hook", + "source_code": "@no_type_check\ndef _register_root_pre_forward_hook(state: _FSDPState, module: nn.Module):\n for forward_handle in state._root_pre_forward_handles:\n forward_handle.remove()\n state._root_pre_forward_handles.clear()\n hook = functools.partial(_root_pre_forward, state)\n state._root_pre_forward_handles.append(module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True))", + "docstring": "Registers root pre-forward hook on `` to a module to indicate that that module is the local FSDP root. We may remove this assumption in the future, in which case we will need to register this root pre-forward hook on any candidate module that may be the local FSDP root.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_register_root_pre_forward_hook arg:state arg:module arguments arg arg For Call Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "create_autocast_variable", + "source_code": "def create_autocast_variable(variable):\n if not distributed_training_utils.is_distributed_variable(variable):\n return AutoCastVariable(variable)\n\n class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):\n\n def __repr__(self):\n return ''.format(v=self)\n return AutoCastDistributedVariable(variable)", + "docstring": "Creates an AutoCastVariable that wraps another variable. This typically just returns . But, if the variable is a DistributedVariable or one of its subclasses, we instead dynamically create a class that subclasses from both AutoCastVariable and variable.__class__. This is so the returned variable will still pass , which is required for DistributedVariables and its subclasses to work properly. Args: variable: A floating-point resource variable to wrap. Returns: An AutoCastVariable that wraps the variable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py", + "ast_data": "FunctionDef name:create_autocast_variable arg:variable arguments arg If Call Return return:yes Call ClassDef name:AutoCastDistributedVariable FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_select_and_clip_prob", + "source_code": "def _select_and_clip_prob(cdfprob, sfprob, cdf=True):\n p = np.where(cdf, cdfprob, sfprob)\n return _clip_prob(p)", + "docstring": "Selects either the CDF or SF, and then clips to range 0<=p<=1.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_ksstats.py", + "ast_data": "FunctionDef name:_select_and_clip_prob arg:cdfprob arg:sfprob arg:cdf arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "name", + "source_code": "@property\ndef name(self):\n name = capi.get_fd_name(self._ldefn)\n return force_str(name, self._ds.encoding, strings_only=True)", + "docstring": "Return the name of this layer in the Data Source.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "range_pop", + "source_code": "def range_pop():\n return _nvtx.rangePop()", + "docstring": "Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\nvtx.py", + "ast_data": "FunctionDef name:range_pop arguments Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, id, len, file, extra=None, png=None):\n self.id = id\n self.len = len\n self.pdfFile = file\n self.file = file.fh\n self.compressobj = None\n if extra is None:\n self.extra = dict()\n else:\n self.extra = extra.copy()\n if png is not None:\n self.extra.update({'Filter': Name('FlateDecode'), 'DecodeParms': png})\n self.pdfFile.recordXref(self.id)\n if mpl.rcParams['pdf.compression'] and (not png):\n self.compressobj = zlib.compressobj(mpl.rcParams['pdf.compression'])\n if self.len is None:\n self.file = BytesIO()\n else:\n self._writeHeader()\n self.pos = self.file.tell()", + "docstring": "Parameters ---------- id : int Object id of the stream. len : Reference or None An unused Reference object for the length of the stream; None means to use a memory buffer so the length can be inlined. file : PdfFile The underlying object to write the stream to. extra : dict from Name to anything, or None Extra key-value pairs to include in the stream header. png : dict or None If the data is already png encoded, the decode parameters.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:id arg:len arg:file arg:extra arg:png arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign If Compare Assign Call Assign Call If Compare Call Call Call If BoolOp Assign Call If Compare Assign Call Call Assign Call" + }, + { + "library": "pandas", + "name": "_has_externally_shared_axis", + "source_code": "def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool:\n if compare_axis == 'x':\n axes = ax1.get_shared_x_axes()\n elif compare_axis == 'y':\n axes = ax1.get_shared_y_axes()\n else:\n raise ValueError(\"_has_externally_shared_axis() needs 'x' or 'y' as a second parameter\")\n axes_siblings = axes.get_siblings(ax1)\n ax1_points = ax1.get_position().get_points()\n for ax2 in axes_siblings:\n if not np.array_equal(ax1_points, ax2.get_position().get_points()):\n return True\n return False", + "docstring": "Return whether an axis is externally shared. Parameters ---------- ax1 : matplotlib.axes.Axes Axis to query. compare_axis : str or according to whether the X-axis or Y-axis is being compared. Returns ------- bool if the axis is externally shared. Otherwise . Notes ----- If two axes with different positions are sharing an axis, they can be referred to as *externally* sharing the common axis. If two axes sharing an axis also have the same position, they can be referred to as *internally* sharing the common axis (a.k.a twinning). _handle_shared_axes() is only interested in axes externally sharing an axis, regardless of whether either of the axes is also internally sharing with a third axis.", + "type": "function", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\tools.py", + "ast_data": "FunctionDef name:_has_externally_shared_axis arg:ax1 arg:compare_axis arguments arg arg If Compare Assign Call If Compare Assign Call Raise Call Assign Call Assign Call Call For If Call Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "contains_branch", + "source_code": "def contains_branch(self, other):\n if self.depth < other.depth:\n return False\n for _, sub_tree in self._iter_break_from_left_to_right():\n if sub_tree == other:\n return True\n return False", + "docstring": "Return whether the given transform is a sub-tree of this transform. This routine uses transform equality to identify sub-trees, therefore in many situations it is object id which will be used. For the case where the given transform represents the whole of this transform, returns True.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:contains_branch arg:self arg:other arguments arg arg If Compare Return return:yes For Call If Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "clear", + "source_code": "def clear(self):\n self._clear_without_update()\n self.update()", + "docstring": "Clear the selection and set the selector ready to make a new one.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "on_train_begin", + "source_code": "def on_train_begin(self, logs=None):\n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_train_begin(logs)", + "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_train_begin arg:self arg:logs arguments arg arg Assign Call For Call" + }, + { + "library": "cryptography", + "name": "add_extension", + "source_code": "def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateSigningRequestBuilder:\n if not isinstance(extval, ExtensionType):\n raise TypeError('extension must be an ExtensionType')\n extension = Extension(extval.oid, critical, extval)\n _reject_duplicate_extension(extension, self._extensions)\n return CertificateSigningRequestBuilder(self._subject_name, [*self._extensions, extension], self._attributes)", + "docstring": "Adds an X.509 extension to the certificate request.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\base.py", + "ast_data": "FunctionDef name:add_extension arg:self arg:extval arg:critical arguments arg arg arg If Call Raise Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_RecordLastCheckpoint", + "source_code": "def _RecordLastCheckpoint(self, latest_save_path):\n if not self.saver_def.max_to_keep:\n return\n for p in self._last_checkpoints[:]:\n if latest_save_path == self._CheckpointFilename(p):\n self._last_checkpoints.remove(p)\n self._last_checkpoints.append((latest_save_path, time.time()))\n if len(self._last_checkpoints) > self.saver_def.max_to_keep:\n self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))", + "docstring": "Manages the list of the latest checkpoints.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_RecordLastCheckpoint arg:self arg:latest_save_path arguments arg arg If Return return:no For If Compare Call Call Call Call If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "sparse_segment_mean", + "source_code": "@tf_export(v1=['sparse.segment_mean', 'sparse_segment_mean'])\n@deprecation.deprecated_endpoints('sparse_segment_mean')\ndef sparse_segment_mean(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False):\n if num_segments is not None:\n return gen_math_ops.sparse_segment_mean_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name, sparse_gradient=sparse_gradient)\n else:\n return gen_math_ops.sparse_segment_mean(data=data, indices=indices, segment_ids=segment_ids, name=name, sparse_gradient=sparse_gradient)", + "docstring": "Computes the mean along sparse segments of a tensor. Read [the section on segmentation]( for an explanation of segments. Like , but can have rank less than 's first dimension, selecting a subset of dimension 0, specified by . is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases is used to determine the size of the output. Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output . sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:sparse_segment_mean arg:data arg:indices arg:segment_ids arg:name arg:num_segments arg:sparse_gradient arguments arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "enable", + "source_code": "def enable(self, event=None):\n self.figure.canvas.widgetlock(self)\n self._idPress = self.figure.canvas.mpl_connect('button_press_event', self._press)\n self._idRelease = self.figure.canvas.mpl_connect('button_release_event', self._release)\n self._idScroll = self.figure.canvas.mpl_connect('scroll_event', self.scroll_zoom)", + "docstring": "Connect press/release events and lock the canvas.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:enable arg:self arg:event arguments arg arg Call Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "scatter_sub", + "source_code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", + "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:scatter_sub arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "broadcast_to", + "source_code": "@dispatch.dispatch_for_api(array_ops.broadcast_to)\ndef broadcast_to(input: ragged_tensor.RaggedOrDense, shape: dynamic_ragged_shape.DynamicRaggedShape) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]:\n return dynamic_ragged_shape.broadcast_to(input, shape)", + "docstring": "Broadcasts a potentially ragged tensor to a ragged shape. Tiles as necessary to match the given shape. Behavior is undefined if is not broadcast-compatible with . Args: input: The potentially ragged tensor to broadcast. shape: A Returns: A potentially ragged tensor whose values are taken from , and whose shape matches .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:broadcast_to arg:input arg:shape arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_db_prep_save", + "source_code": "def get_db_prep_save(self, value, connection):\n if hasattr(value, 'as_sql'):\n return value\n return self.get_db_prep_value(value, connection=connection, prepared=False)", + "docstring": "Return field's value prepared for saving into a database.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:get_db_prep_save arg:self arg:value arg:connection arguments arg arg arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "has_resource", + "source_code": "def has_resource(self, feature_column, resource_name):\n return resource_name in self._cols_to_resources_map[feature_column]", + "docstring": "Returns true iff a resource with same name exists. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. resource_name: Name of the resource.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:has_resource arg:self arg:feature_column arg:resource_name arguments arg arg arg Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "_premultiplied_argb32_to_unmultiplied_rgba8888", + "source_code": "def _premultiplied_argb32_to_unmultiplied_rgba8888(buf):\n rgba = np.take(buf, [2, 1, 0, 3] if sys.byteorder == 'little' else [1, 2, 3, 0], axis=2)\n rgb = rgba[..., :-1]\n alpha = rgba[..., -1]\n mask = alpha != 0\n for channel in np.rollaxis(rgb, -1):\n channel[mask] = (channel[mask].astype(int) * 255 + alpha[mask] // 2) // alpha[mask]\n return rgba", + "docstring": "Convert a premultiplied ARGB32 buffer to an unmultiplied RGBA8888 buffer.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_premultiplied_argb32_to_unmultiplied_rgba8888 arg:buf arguments arg Assign Call Compare Assign Assign Assign Compare For Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_process_docstring", + "source_code": "def _process_docstring(app: Sphinx, what: str, name: str, obj: Any, options: Any, lines: list[str]) -> None:\n result_lines = lines\n docstring: GoogleDocstring\n if app.config.napoleon_numpy_docstring:\n docstring = NumpyDocstring(result_lines, app.config, app, what, name, obj, options)\n result_lines = docstring.lines()\n if app.config.napoleon_google_docstring:\n docstring = GoogleDocstring(result_lines, app.config, app, what, name, obj, options)\n result_lines = docstring.lines()\n lines[:] = result_lines.copy()", + "docstring": "Process the docstring for a given python object. Called when autodoc has read and processed a docstring. is a list of docstring lines that modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * `lines` is modified *in place*", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\napoleon\\__init__.py", + "ast_data": "FunctionDef name:_process_docstring arg:app arg:what arg:name arg:obj arg:options arg:lines arguments arg arg arg arg arg arg Assign If Assign Call Assign Call If Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "register_custom_device", + "source_code": "def register_custom_device(self, device_capsule, device_name, device_info_capsule):\n self.ensure_initialized()\n pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule, device_name, device_info_capsule)", + "docstring": "Calls TFE_RegisterCustomDevice. See the non-member function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:register_custom_device arg:self arg:device_capsule arg:device_name arg:device_info_capsule arguments arg arg arg arg Call Call" + }, + { + "library": "scikit-learn", + "name": "score_samples", + "source_code": "def score_samples(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, reset=False)\n Xr = X - self.mean_\n precision = self.get_precision()\n n_features = X.shape[1]\n log_like = -0.5 * (Xr * np.dot(Xr, precision)).sum(axis=1)\n log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))\n return log_like", + "docstring": "Compute the log-likelihood of each sample. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data. Returns ------- ll : ndarray of shape (n_samples,) Log-likelihood of each sample under the current model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", + "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "ValidationError", + "source_code": "class ValidationError(Exception):\n pass", + "docstring": "Raised for validation errors.", + "type": "class", + "file_path": "sphinx\\sphinx\\cmd\\quickstart.py", + "ast_data": "ClassDef name:ValidationError" + }, + { + "library": "scikit-learn", + "name": "_extend_region", + "source_code": "def _extend_region(steep_point, xward_point, start, min_samples):\n n_samples = len(steep_point)\n non_xward_points = 0\n index = start\n end = start\n while index < n_samples:\n if steep_point[index]:\n non_xward_points = 0\n end = index\n elif not xward_point[index]:\n non_xward_points += 1\n if non_xward_points > min_samples:\n break\n else:\n return end\n index += 1\n return end", + "docstring": "Extend the area until it's maximal. It's the same function for both upward and downward reagions, depending on the given input parameters. Assuming: - steep_{upward/downward}: bool array indicating whether a point is a steep {upward/downward}; - upward/downward: bool array indicating whether a point is upward/downward; To extend an upward reagion, `` index.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cluster\\_optics.py", + "ast_data": "FunctionDef name:_extend_region arg:steep_point arg:xward_point arg:start arg:min_samples arguments arg arg arg arg Assign Call Assign Assign Assign While Compare If Assign Assign If If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_like_rnncell", + "source_code": "def assert_like_rnncell(cell_name, cell):\n conditions = [_hasattr(cell, 'output_size'), _hasattr(cell, 'state_size'), _hasattr(cell, 'get_initial_state') or _hasattr(cell, 'zero_state'), callable(cell)]\n errors = [\"'output_size' property is missing\", \"'state_size' property is missing\", \"either 'zero_state' or 'get_initial_state' method is required\", 'is not callable']\n if not all(conditions):\n errors = [error for error, cond in zip(errors, conditions) if not cond]\n raise TypeError('The argument {!r} ({}) is not an RNNCell: {}.'.format(cell_name, cell, ', '.join(errors)))", + "docstring": "Raises a TypeError if cell is not like an RNNCell. NOTE: Do not rely on the error message (in particular in tests) which can be subject to change to increase readability. Use ASSERT_LIKE_RNNCELL_ERROR_REGEXP. Args: cell_name: A string to give a meaningful error referencing to the name of the functionargument. cell: The object which should behave like an RNNCell. Raises: TypeError: A human-friendly exception.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:assert_like_rnncell arg:cell_name arg:cell arguments arg arg Assign Call Call BoolOp Call Call Call Assign If Call Assign Call Raise Call Call Call" + }, + { + "library": "pytorch", + "name": "type", + "source_code": "@staticmethod\ndef type() -> str:\n raise RuntimeError('CacheArtifact is an abstract class, please use a subclass')", + "docstring": "Returns the type of the artifact. Must be unique across all CacheArtifact classes. CacheArtifactFactory.register will add property method to CacheInfo based on this (def {type}_artifacts) that returns all artifacts for specific cache.", + "type": "method", + "file_path": "pytorch\\torch\\compiler\\_cache.py", + "ast_data": "FunctionDef name:type arguments Raise Call" + }, + { + "library": "seaborn", + "name": "share_axis", + "source_code": "def share_axis(ax0, ax1, which):\n if _version_predates(mpl, '3.5'):\n group = getattr(ax0, f'get_shared_{which}_axes')()\n group.join(ax1, ax0)\n else:\n getattr(ax1, f'share{which}')(ax0)", + "docstring": "Handle changes to post-hoc axis sharing.", + "type": "function", + "file_path": "seaborn\\seaborn\\_compat.py", + "ast_data": "FunctionDef name:share_axis arg:ax0 arg:ax1 arg:which arguments arg arg arg If Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "capture_call_time_value", + "source_code": "def capture_call_time_value(self, closure, spec, key=None, default_value=None, placeholder=None):\n if key is None:\n key = object()\n if key not in self._function_captures.by_ref_internal:\n trace_ctx = trace_type.InternalTracingContext(True)\n spec = trace_type.from_value(spec, trace_ctx)\n if placeholder is None:\n placeholder_ctx = trace_type.InternalPlaceholderContext(self)\n placeholder = spec.placeholder_value(placeholder_ctx)\n\n def wrapped_closure():\n if save_context.in_save_context() and default_value is not None:\n return default_value\n if not context.executing_eagerly():\n graph = ops.get_default_graph()\n assert isinstance(graph, FuncGraph), 'This API should only be used in TF2 environment.'\n with graph.as_default():\n ret_nest = graph.capture_call_time_value(closure, spec, key=key, default_value=default_value)\n else:\n ret_nest = closure()\n ret_nest = spec.cast(ret_nest, trace_type.InternalCastContext)\n return spec.to_tensors(ret_nest)\n wrapped_closure.output_spec = spec\n self._function_captures.add_or_replace(key=key, external=wrapped_closure, internal=placeholder, tracetype=spec, is_by_ref=True)\n return self._function_captures.by_ref_internal[key]", + "docstring": "Returns a placeholder which at call time has the value closure(). The supports the notion of captures, that is, it allows Python functions to have closure variables, which bind over some value outside the function. However, this name binding is \"early binding\" performed before the program is run, i.e., while in Python, name binding is performed as the program is running. allows tf.function to mimic late binding as a Python function does, by passing in a callable argument to be executed when the tf.function is invoked eagerly. E.g. Note that a function itself does not work well in the saving process (since the tf.function in which it's called is not invoked eagerly) unless passed a argument. At saving time, the argument is returned instead. Args: closure: function which takes no arguments, to be evaluated at function call time, returning a nest of tensors compatible with . spec: nest of TypeSpec for the value to capture. key: optional. If not None, multiple calls to lazy_capture with the same key in the same graph will return the same placeholder, and the first closure will be used at function call time. default_value: optional value to return in environments that cannot safely evaluate closure. placeholder: optional. If not None, the graph will take the passed-in as the internal capture instead of creating a new one. This is useful when loading from a SavedModel. Returns: Nest of placeholders which, at function call time, will be fed with the result of calling closure(). Raises: ValueError: at function call time, if the return value of closure() is not compatible with .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:capture_call_time_value arg:self arg:closure arg:spec arg:key arg:default_value arg:placeholder arguments arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call FunctionDef name:wrapped_closure arguments If BoolOp Call Compare Return return:yes If Call Assign Call Call With Call Assign Call Assign Call Assign Call Return return:yes Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "AggregateQuery", + "source_code": "class AggregateQuery(Query):\n compiler = 'SQLAggregateCompiler'\n\n def __init__(self, model, inner_query):\n self.inner_query = inner_query\n super().__init__(model)", + "docstring": "Take another query as a parameter to the FROM clause and only select the elements in the provided list.", + "type": "class", + "file_path": "django\\django\\db\\models\\sql\\subqueries.py", + "ast_data": "ClassDef name:AggregateQuery Assign FunctionDef name:__init__ arg:self arg:model arg:inner_query arguments arg arg arg Assign Call Call" + }, + { + "library": "pandas", + "name": "resolution", + "source_code": "@property\ndef resolution(self) -> str:\n return self._resolution_obj.attrname", + "docstring": "Returns day, hour, minute, second, millisecond or microsecond", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:resolution arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "add_image", + "source_code": "def add_image(self, image):\n _api.check_isinstance(mimage.AxesImage, image=image)\n self._set_artist_props(image)\n if not image.get_label():\n image.set_label(f'_child{len(self._children)}')\n self._children.append(image)\n image._remove_method = self._children.remove\n self.stale = True\n return image", + "docstring": "Add an to the Axes; return the image.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:add_image arg:self arg:image arguments arg arg Call Call If Call Call Call Call Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "AlterModelOptions", + "source_code": "class AlterModelOptions(ModelOptionOperation):\n ALTER_OPTION_KEYS = ['base_manager_name', 'default_manager_name', 'default_related_name', 'get_latest_by', 'managed', 'ordering', 'permissions', 'default_permissions', 'select_on_save', 'verbose_name', 'verbose_name_plural']\n\n def __init__(self, name, options):\n self.options = options\n super().__init__(name)\n\n def deconstruct(self):\n kwargs = {'name': self.name, 'options': self.options}\n return (self.__class__.__qualname__, [], kwargs)\n\n def state_forwards(self, app_label, state):\n state.alter_model_options(app_label, self.name_lower, self.options, self.ALTER_OPTION_KEYS)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def describe(self):\n return 'Change Meta options on %s' % self.name\n\n @property\n def migration_name_fragment(self):\n return 'alter_%s_options' % self.name_lower", + "docstring": "Set new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\operations\\models.py", + "ast_data": "ClassDef name:AlterModelOptions Assign FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Call FunctionDef name:deconstruct arg:self arguments arg Assign Return return:yes FunctionDef name:state_forwards arg:self arg:app_label arg:state arguments arg arg arg Call FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:describe arg:self arguments arg Return return:yes FunctionDef name:migration_name_fragment arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_Assert3DImage", + "source_code": "def _Assert3DImage(image):\n return control_flow_ops.with_dependencies(_Check3DImage(image, require_static=False), image)", + "docstring": "Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if is not a 3-vector. Returns: If the shape of could be verified statically, is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:_Assert3DImage arg:image arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "to_dict", + "source_code": "def to_dict(self, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'records', 'index']='dict', *, into: type[MutableMappingT] | MutableMappingT=dict, index: bool=True) -> MutableMappingT | list[MutableMappingT]:\n from pandas.core.methods.to_dict import to_dict\n return to_dict(self, orient, into=into, index=index)", + "docstring": "Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the `orientorientorientorientdefaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict(\"records\", into=dd) [defaultdict(, {'col1': 1, 'col2': 0.5}), defaultdict(, {'col1': 2, 'col2': 0.75})]", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:to_dict arg:self arg:orient arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_has_valid_tensors", + "source_code": "def _has_valid_tensors(self):\n return self._input_tensors is not None and self._output_tensors", + "docstring": "Checks if the input and output tensors have been initialized. Returns: Bool.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_has_valid_tensors arg:self arguments arg Return return:yes BoolOp Compare" + }, + { + "library": "tensorflow", + "name": "scatter_min", + "source_code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_min(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", + "docstring": "Updates this variable with the min of and itself. Args: sparse_delta: to use as an argument of min with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:scatter_min arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_on_device_or_primary", + "source_code": "def _get_on_device_or_primary(self):\n if values_util.is_saving_non_distributed():\n return self._primary\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n current_device = device_util.canonicalize(device_util.current())\n for i, value in enumerate(self._values):\n if device_util.canonicalize(value.device) == current_device:\n return self._get_replica(i)\n return self._get_replica(0)\n else:\n return self._get_replica(replica_id)", + "docstring": "Returns value in same replica or device if possible, else the _primary.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_get_on_device_or_primary arg:self arguments arg If Call Return return:yes Assign Call If Compare Assign Call Call For Call If Compare Call Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_reduce", + "source_code": "def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs):\n meth = getattr(self, name, None)\n if meth is None:\n raise TypeError(f\"'{type(self).__name__}' with dtype {self.dtype} does not support operation '{name}'\")\n result = meth(skipna=skipna, **kwargs)\n if keepdims:\n if name in ['min', 'max']:\n result = self._from_sequence([result], dtype=self.dtype)\n else:\n result = np.array([result])\n return result", + "docstring": "Return a scalar result of performing the reduction operation. Parameters ---------- name : str Name of the function, supported values are: { any, all, min, max, sum, mean, median, prod, std, var, sem, kurt, skew }. skipna : bool, default True If True, skip NaN values. keepdims : bool, default False If False, a scalar is returned. If True, the result has dimension with size one along the reduced axis. **kwargs Additional keyword arguments passed to the reduction function. Currently, is the only supported kwarg. Returns ------- scalar or ndarray: The result of the reduction operation. The type of the result depends on : - If is , a scalar value is returned. - If is , the result is wrapped in a numpy array with a single element. Raises ------ TypeError : subclass does not define operations See Also -------- Series.min : Return the minimum value. Series.max : Return the maximum value. Series.sum : Return the sum of values. Series.mean : Return the mean of values. Series.median : Return the median of values. Series.std : Return the standard deviation. Series.var : Return the variance. Series.prod : Return the product of values. Series.sem : Return the standard error of the mean. Series.kurt : Return the kurtosis. Series.skew : Return the skewness. Examples -------- >>> pd.array([1, 2, 3])._reduce(\"min\") np.int64(1) >>> pd.array([1, 2, 3])._reduce(\"max\") np.int64(3) >>> pd.array([1, 2, 3])._reduce(\"sum\") np.int64(6) >>> pd.array([1, 2, 3])._reduce(\"mean\") np.float64(2.0) >>> pd.array([1, 2, 3])._reduce(\"median\") np.float64(2.0)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_reduce arg:self arg:name arguments arg arg arg arg arg Assign Call If Compare Raise Call Call Assign Call If If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "confidence_interval", + "source_code": "def confidence_interval(self, confidence_level=0.95):\n low, high = _t_confidence_interval(self.df, self._statistic_np, confidence_level, self._alternative, self._dtype, self._xp)\n low = low * self._standard_error + self._estimate\n high = high * self._standard_error + self._estimate\n return ConfidenceInterval(low=low, high=high)", + "docstring": "Parameters ---------- confidence_level : float The confidence level for the calculation of the population mean confidence interval. Default is 0.95. Returns ------- ci : namedtuple The confidence interval is returned in a `lowhigh`.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = validate_data(self, X)\n if self.assume_centered:\n self.location_ = np.zeros(X.shape[1])\n else:\n self.location_ = X.mean(0)\n covariance, shrinkage = _ledoit_wolf(X - self.location_, assume_centered=True, block_size=self.block_size)\n self.shrinkage_ = shrinkage\n self._set_covariance(covariance)\n return self", + "docstring": "Fit the Ledoit-Wolf shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_shrunk_covariance.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X, return_std=False):\n y_mean = self._decision_function(X)\n if return_std is False:\n return y_mean\n else:\n col_index = self.lambda_ < self.threshold_lambda\n X = _safe_indexing(X, indices=col_index, axis=1)\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + 1.0 / self.alpha_)\n return (y_mean, y_std)", + "docstring": "Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_bayes.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arg:return_std arguments arg arg arg Assign Call If Compare Return return:yes Assign Compare Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "_get_missing_target_ids", + "source_code": "def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):\n vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], '%s__in' % target_field_name: target_ids})\n return target_ids.difference(vals)", + "docstring": "Return the subset of ids of that aren't already assigned to this relationship.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "FunctionDef name:_get_missing_target_ids arg:self arg:source_field_name arg:target_field_name arg:db arg:target_ids arguments arg arg arg arg arg Assign Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_natural_params", + "source_code": "@property\ndef _natural_params(self) -> tuple[Tensor, ...]:\n raise NotImplementedError", + "docstring": "Abstract method for natural parameters. Returns a tuple of Tensors based on the distribution", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\exp_family.py", + "ast_data": "FunctionDef name:_natural_params arg:self arguments arg Raise" + }, + { + "library": "scipy", + "name": "_fractional_power_pade", + "source_code": "def _fractional_power_pade(R, t, m):\n if m < 1 or int(m) != m:\n raise ValueError('expected a positive integer m')\n if not -1 < t < 1:\n raise ValueError('expected -1 < t < 1')\n R = np.asarray(R)\n if len(R.shape) != 2 or R.shape[0] != R.shape[1]:\n raise ValueError('expected an upper triangular square matrix')\n n, n = R.shape\n ident = np.identity(n)\n Y = R * _fractional_power_pade_constant(2 * m, t)\n for j in range(2 * m - 1, 0, -1):\n rhs = R * _fractional_power_pade_constant(j, t)\n Y = solve_triangular(ident + Y, rhs)\n U = ident + Y\n if not np.array_equal(U, np.triu(U)):\n raise Exception('U is not upper triangular')\n return U", + "docstring": "Evaluate the Pade approximation of a fractional matrix power. Evaluate the degree-m Pade approximation of R to the fractional matrix power t using the continued fraction in bottom-up fashion using algorithm (4.1) in [1]_. Parameters ---------- R : (N, N) array_like Upper triangular matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. m : positive integer Degree of Pade approximation. Returns ------- U : (N, N) array_like The degree-m Pade approximation of R to the fractional power t. This matrix will be upper triangular. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py", + "ast_data": "FunctionDef name:_fractional_power_pade arg:R arg:t arg:m arguments arg arg arg If BoolOp Compare Compare Call Raise Call If Compare Raise Call Assign Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Call For Call Assign Call Assign Call Assign If Call Call Raise Call Return return:yes" + }, + { + "library": "django", + "name": "_checkindex", + "source_code": "def _checkindex(self, index):\n if not 0 <= index < self.size:\n raise IndexError('invalid GEOS Geometry index: %s' % index)", + "docstring": "Check the given index.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:_checkindex arg:self arg:index arguments arg arg If Compare Raise Call" + }, + { + "library": "pytorch", + "name": "_is_quantized_op_pt2e", + "source_code": "def _is_quantized_op_pt2e(node: torch.fx.Node):\n if not _is_any_annotated([node]):\n return False\n quantization_annotation = node.meta.get(QUANT_ANNOTATION_KEY, None)\n assert isinstance(quantization_annotation, _X86InductorQuantizationAnnotation)\n return quantization_annotation._is_output_of_quantized_pattern", + "docstring": "Used for pt2e flow to check if the node is a quantized node: Case1: the node has been annotated as output node of a fusion pattern. Case2: the node has been annotated as single quantized node.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py", + "ast_data": "FunctionDef name:_is_quantized_op_pt2e arg:node arguments arg If Call Return return:yes Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_can_use_numexpr", + "source_code": "def _can_use_numexpr(op, op_str, left_op, right_op, dtype_check) -> bool:\n if op_str is not None:\n if left_op.size > _MIN_ELEMENTS:\n dtypes: set[str] = set()\n for o in [left_op, right_op]:\n if hasattr(o, 'dtype'):\n dtypes |= {o.dtype.name}\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n return False", + "docstring": "return left_op boolean if we WILL be using numexpr", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expressions.py", + "ast_data": "FunctionDef name:_can_use_numexpr arg:op arg:op_str arg:left_op arg:right_op arg:dtype_check arguments arg arg arg arg arg If Compare If Compare Call For If Call If BoolOp Call Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "convert_to_tensor_or_composite", + "source_code": "def convert_to_tensor_or_composite(value, dtype=None, name=None) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]:\n return internal_convert_to_tensor_or_composite(value=value, dtype=dtype, name=name, as_ref=False)", + "docstring": "Converts the given object to a or . If is a it is returned unmodified. Otherwise, it is converted to a using . Args: value: A or an object that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name to use if a new is created. Returns: A or , based on . Raises: ValueError: If does not match the element type of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:convert_to_tensor_or_composite arg:value arg:dtype arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "bessel_i0", + "source_code": "@tf_export('math.bessel_i0', 'math.special.bessel_i0')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_i0(x, name=None):\n with ops.name_scope(name, 'bessel_i0', [x]):\n return gen_special_math_ops.bessel_i0(x)", + "docstring": "Computes the Bessel i0 function of element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function instead. >>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy() array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i0 @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:bessel_i0 arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_tensor_from_tensor_info", + "source_code": "@tf_export(v1=['saved_model.get_tensor_from_tensor_info', 'saved_model.utils.get_tensor_from_tensor_info'])\n@deprecation.deprecated(None, _DEPRECATION_MSG)\ndef get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):\n graph = graph or ops.get_default_graph()\n\n def _get_tensor(name):\n return graph.get_tensor_by_name(ops.prepend_name_scope(name, import_scope=import_scope))\n encoding = tensor_info.WhichOneof('encoding')\n if encoding == 'name':\n return _get_tensor(tensor_info.name)\n elif encoding == 'coo_sparse':\n return sparse_tensor.SparseTensor(_get_tensor(tensor_info.coo_sparse.indices_tensor_name), _get_tensor(tensor_info.coo_sparse.values_tensor_name), _get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))\n elif encoding == 'composite_tensor':\n spec_proto = struct_pb2.StructuredValue(type_spec_value=tensor_info.composite_tensor.type_spec)\n spec = nested_structure_coder.decode_proto(spec_proto)\n components = [_get_tensor(component.name) for component in tensor_info.composite_tensor.components]\n return nest.pack_sequence_as(spec, components, expand_composites=True)\n else:\n raise ValueError(f'Invalid TensorInfo.encoding: {encoding}. Expected `coo_sparse`, `composite_tensor`, or `name` for a dense tensor.')", + "docstring": "Returns the Tensor or CompositeTensor described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or CompositeTensor. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in are prefixed with this string before lookup. Returns: The Tensor or SparseTensor or CompositeTensor in described by . Raises: KeyError: If does not correspond to a tensor in . ValueError: If is malformed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py", + "ast_data": "FunctionDef name:get_tensor_from_tensor_info arg:tensor_info arg:graph arg:import_scope arguments arg arg arg Assign BoolOp Call FunctionDef name:_get_tensor arg:name arguments arg Return return:yes Call Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Call Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "replicated", + "source_code": "@classmethod\ndef replicated(cls, mesh: Mesh, rank: int) -> 'Layout':\n return cls._new_object(mesh=mesh, rank=rank)", + "docstring": "Returns a replicated layout of rank .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:replicated arg:cls arg:mesh arg:rank arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_find_library", + "source_code": "def _find_library(base_paths, library_name, required_version):\n if _is_windows():\n filepattern = library_name + '.lib'\n elif _is_macos():\n filepattern = '%s*.dylib' % '.'.join(['lib' + library_name] + required_version.split('.')[:1])\n else:\n filepattern = '.'.join(['lib' + library_name, 'so'] + required_version.split('.')[:1]) + '*'\n return _find_file(base_paths, _library_paths(), filepattern)", + "docstring": "Returns first valid path to the requested library.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py", + "ast_data": "FunctionDef name:_find_library arg:base_paths arg:library_name arg:required_version arguments arg arg arg If Call Assign If Call Assign Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "select_related_descend", + "source_code": "def select_related_descend(field, restricted, requested, select_mask):\n if not field.remote_field:\n return False\n if getattr(field.remote_field, 'parent_link', False):\n return False\n if not restricted:\n return not field.null\n if field.name not in requested:\n return False\n if select_mask and field not in select_mask:\n raise FieldError(f'Field {field.model._meta.object_name}.{field.name} cannot be both deferred and traversed using select_related at the same time.')\n return True", + "docstring": "Return whether should be used to descend deeper for purposes. Arguments: * - the field to be checked. Can be either a or instance. * - a boolean field, indicating if the field list has been manually restricted using a select_related() clause. * - the select_related() dictionary. * - the dictionary of selected fields.", + "type": "function", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "FunctionDef name:select_related_descend arg:field arg:restricted arg:requested arg:select_mask arguments arg arg arg arg If Return return:yes If Call Return return:yes If Return return:yes If Compare Return return:yes If BoolOp Compare Raise Call Return return:yes" + }, + { + "library": "scrapy", + "name": "send_catch_log_deferred", + "source_code": "def send_catch_log_deferred(self, signal: Any, **kwargs: Any) -> Deferred[list[tuple[Any, Any]]]:\n kwargs.setdefault('sender', self.sender)\n return _signal.send_catch_log_deferred(signal, **kwargs)", + "docstring": "Like :meth: but supports :ref:. Returns a Deferred that gets fired once all signal handlers have finished. Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth: method).", + "type": "method", + "file_path": "scrapy\\scrapy\\signalmanager.py", + "ast_data": "FunctionDef name:send_catch_log_deferred arg:self arg:signal arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SparseKerasTensor", + "source_code": "class SparseKerasTensor(KerasTensor):\n\n def _to_placeholder(self):\n spec = self.type_spec\n return array_ops.sparse_placeholder(dtype=spec.dtype, shape=spec.shape)", + "docstring": "A specialized KerasTensor representation for s. Specifically, it specializes the conversion to a placeholder in order to maintain dense shape information.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "ClassDef name:SparseKerasTensor FunctionDef name:_to_placeholder arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "_gotitem", + "source_code": "def _gotitem(self, key: IndexLabel, ndim: int, subset: DataFrame | Series | None=None) -> DataFrame | Series:\n if subset is None:\n subset = self\n elif subset.ndim == 1:\n return subset\n return subset[key]", + "docstring": "Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg If Compare Assign If Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "mean_absolute_error", + "source_code": "@validate_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None], 'multioutput': [StrOptions({'raw_values', 'uniform_average'}), 'array-like']}, prefer_skip_nested_validation=True)\ndef mean_absolute_error(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average'):\n xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)\n _, y_true, y_pred, sample_weight, multioutput = _check_reg_targets_with_floating_dtype(y_true, y_pred, sample_weight, multioutput, xp=xp)\n check_consistent_length(y_true, y_pred, sample_weight)\n output_errors = _average(xp.abs(y_pred - y_true), weights=sample_weight, axis=0, xp=xp)\n if isinstance(multioutput, str):\n if multioutput == 'raw_values':\n return output_errors\n elif multioutput == 'uniform_average':\n multioutput = None\n mean_absolute_error = _average(output_errors, weights=multioutput)\n return float(mean_absolute_error)", + "docstring": "Mean absolute error regression loss. The mean absolute error is a non-negative floating point value, where best value is 0.0. Read more in the :ref:. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or array of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85...", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_regression.py", + "ast_data": "FunctionDef name:mean_absolute_error arg:y_true arg:y_pred arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call If Call If Compare Return return:yes If Compare Assign Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "RegularPolygon", + "source_code": "class RegularPolygon(Patch):\n\n def __str__(self):\n s = 'RegularPolygon((%g, %g), %d, radius=%g, orientation=%g)'\n return s % (self.xy[0], self.xy[1], self.numvertices, self.radius, self.orientation)\n\n @_docstring.interpd\n def __init__(self, xy, numVertices, *, radius=5, orientation=0, **kwargs):\n self.xy = xy\n self.numvertices = numVertices\n self.orientation = orientation\n self.radius = radius\n self._path = Path.unit_regular_polygon(numVertices)\n self._patch_transform = transforms.Affine2D()\n super().__init__(**kwargs)\n\n def get_path(self):\n return self._path\n\n def get_patch_transform(self):\n return self._patch_transform.clear().scale(self.radius).rotate(self.orientation).translate(*self.xy)", + "docstring": "A regular polygon patch.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:RegularPolygon FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes FunctionDef name:__init__ arg:self arg:xy arg:numVertices arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Call Call FunctionDef name:get_path arg:self arguments arg Return return:yes FunctionDef name:get_patch_transform arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "ModeKeyMap", + "source_code": "class ModeKeyMap(collections.abc.Mapping):\n\n def __init__(self, **kwargs):\n self._internal_dict = {}\n self._keys = []\n for key in kwargs:\n self._keys.append(key)\n dict_key = self._get_internal_key(key)\n if dict_key in self._internal_dict:\n raise ValueError('Error creating ModeKeyMap. Multiple keys/values found for {} mode.'.format(dict_key))\n self._internal_dict[dict_key] = kwargs[key]\n\n def _get_internal_key(self, key):\n if is_train(key):\n return KerasModeKeys.TRAIN\n if is_eval(key):\n return KerasModeKeys.TEST\n if is_predict(key):\n return KerasModeKeys.PREDICT\n raise ValueError('Invalid mode key: {}.'.format(key))\n\n def __getitem__(self, key):\n return self._internal_dict[self._get_internal_key(key)]\n\n def __iter__(self):\n return iter(self._keys)\n\n def __len__(self):\n return len(self._keys)", + "docstring": "Map using ModeKeys as keys. This class creates an immutable mapping from modes to values. For example, SavedModel export of Keras models use this to map modes to their corresponding MetaGraph tags/SignatureDef keys.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\mode_keys.py", + "ast_data": "ClassDef name:ModeKeyMap FunctionDef name:__init__ arg:self arguments arg arg Assign Assign For Call Assign Call If Compare Raise Call Call Assign FunctionDef name:_get_internal_key arg:self arg:key arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "saving_errors", + "source_code": "@property\ndef saving_errors(self):\n return self._saving_errors", + "docstring": "Returns set of errors preventing this FuncGraph from being saved.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:saving_errors arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "unitary_group_gen", + "source_code": "class unitary_group_gen(multi_rv_generic):\n\n def __init__(self, seed=None):\n super().__init__(seed)\n self.__doc__ = doccer.docformat(self.__doc__)\n\n def __call__(self, dim=None, seed=None):\n return unitary_group_frozen(dim, seed=seed)\n\n def _process_parameters(self, dim):\n if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n return dim\n\n def rvs(self, dim, size=1, random_state=None):\n random_state = self._get_random_state(random_state)\n size = int(size)\n dim = self._process_parameters(dim)\n size = (size,) if size > 1 else ()\n z = 1 / math.sqrt(2) * (random_state.normal(size=size + (dim, dim)) + 1j * random_state.normal(size=size + (dim, dim)))\n q, r = np.linalg.qr(z)\n d = r.diagonal(offset=0, axis1=-2, axis2=-1)\n q *= (d / abs(d))[..., np.newaxis, :]\n return q", + "docstring": "A matrix-valued U(N) random variable. Return a random unitary matrix. The keyword specifies the dimension N. Methods ------- rvs(dim=None, size=1, random_state=None) Draw random samples from U(N). Parameters ---------- dim : scalar Dimension of matrices. seed : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNoneortho_groupmath-ph/0609050v2dim` parameter, return a \"frozen\" unitary_group random variable: >>> rv = unitary_group(5) See Also -------- ortho_group", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "ClassDef name:unitary_group_gen FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:dim arg:seed arguments arg arg arg Return return:yes Call FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "RidgeClassifier", + "source_code": "class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge):\n _parameter_constraints: dict = {**_BaseRidge._parameter_constraints, 'class_weight': [dict, StrOptions({'balanced'}), None]}\n\n def __init__(self, alpha=1.0, *, fit_intercept=True, copy_X=True, max_iter=None, tol=0.0001, class_weight=None, solver='auto', positive=False, random_state=None):\n super().__init__(alpha=alpha, fit_intercept=fit_intercept, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, positive=positive, random_state=random_state)\n self.class_weight = class_weight\n\n @_fit_context(prefer_skip_nested_validation=True)\n def fit(self, X, y, sample_weight=None):\n X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)\n super().fit(X, Y, sample_weight=sample_weight)\n return self\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.input_tags.sparse = self.solver != 'svd' and (self.solver != 'cholesky' or not self.fit_intercept)\n return tags", + "docstring": "Classifier using Ridge regression. This classifier first converts the target values into `User Guide ~sklearn.linear_model.LogisticRegression~sklearn.svm.LinearSVCcoef_toltoltoltoltoltoltoltolmax_iterscipy.optimize.minimizepositiveGlossary fitn_features_in_fitX` has feature names that are all strings. .. versionadded:: 1.0 solver_ : str The solver that was used at fit time by the computational routines. .. versionadded:: 1.5 See Also -------- Ridge : Ridge regression. RidgeClassifierCV : Ridge classifier with built-in cross validation. Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.linear_model import RidgeClassifier >>> X, y = load_breast_cancer(return_X_y=True) >>> clf = RidgeClassifier().fit(X, y) >>> clf.score(X, y) 0.9595...", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py", + "ast_data": "ClassDef name:RidgeClassifier Call FunctionDef name:__init__ arg:self arg:alpha arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Call Call Return return:yes Call FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign BoolOp Compare BoolOp Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "conv2d_backprop_filter", + "source_code": "@tf_export(v1=['nn.conv2d_backprop_filter'])\n@dispatch.add_dispatch_support\ndef conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format='NHWC', dilations=[1, 1, 1, 1], name=None):\n padding, explicit_paddings = convert_padding(padding)\n return gen_nn_ops.conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name)", + "docstring": "Computes the gradients of convolution with respect to the filter. Args: input: A . Must be one of the following types: , , , . 4-D with shape . filter_sizes: A of type . An integer vector representing the tensor shape of , where is a 4-D tensor. out_backprop: A . Must have the same type as . 4-D with shape . Gradients w.r.t. the output of the convolution. strides: A list of . The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the or indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is , this should be in the form . When explicit padding used and data_format is , this should be in the form . use_cudnn_on_gpu: An optional . Defaults to . data_format: An optional from: . Defaults to . Specify the data format of the input and output data. With the default format \"NHWC\", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be \"NCHW\", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of . Defaults to . 1-D tensor of length 4. The dilation factor for each dimension of . If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of , see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:conv2d_backprop_filter arg:input arg:filter_sizes arg:out_backprop arg:strides arg:padding arg:use_cudnn_on_gpu arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "AddValue", + "source_code": "def AddValue(self, val):\n result = val\n new_value = val.name not in self._values\n new_value &= val.op._control_flow_context is not self\n if new_value:\n self._values.add(val.name)\n grad_ctxt = ops.get_default_graph()._get_control_flow_context()\n if grad_ctxt:\n grad_ctxt = grad_ctxt.GetWhileContext()\n if grad_ctxt.grad_state:\n forward_ctxt = util.GetWhileContext(val.op)\n if util.IsLoopExit(val.op):\n forward_ctxt = forward_ctxt.outer_context\n if forward_ctxt:\n forward_ctxt = forward_ctxt.GetWhileContext()\n if forward_ctxt == grad_ctxt.grad_state.forward_context:\n real_val = grad_ctxt.grad_state.GetRealValue(val)\n self._external_values[val.name] = real_val\n return real_val\n if self._outer_context is not None:\n result = self._outer_context.AddValue(val)\n with ops.control_dependencies(None):\n enter = _Enter(result, self._name, is_constant=True, parallel_iterations=self._parallel_iterations)\n enter.graph.prevent_feeding(enter)\n if self._outer_context:\n self._outer_context.AddInnerOp(enter.op)\n self._FixControlInputsAndContext([enter])\n self._values.add(enter.name)\n self._external_values[val.name] = enter\n result = enter\n else:\n actual_val = self._external_values.get(val.name)\n if actual_val is not None:\n result = actual_val\n return result", + "docstring": "Add to the current context and its outer context recursively.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:AddValue arg:self arg:val arguments arg arg Assign Assign Compare Compare If Call Assign Call Call If Assign Call If Assign Call If Call Assign If Assign Call If Compare Assign Call Assign Return return:yes If Compare Assign Call With Call Assign Call Call If Call Call Call Assign Assign Assign Call If Compare Assign Return return:yes" + }, + { + "library": "authlib", + "name": "create_query_client_func", + "source_code": "def create_query_client_func(session, client_model):\n\n def query_client(client_id):\n q = session.query(client_model)\n return q.filter_by(client_id=client_id).first()\n return query_client", + "docstring": "Create an `` function that can be used in authorization server. :param session: SQLAlchemy session :param client_model: Client model class", + "type": "function", + "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py", + "ast_data": "FunctionDef name:create_query_client_func arg:session arg:client_model arguments arg arg FunctionDef name:query_client arg:client_id arguments arg Assign Call Return return:yes Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "UnknownFortranRoutine", + "source_code": "class UnknownFortranRoutine(FortranRoutine):\n type = 'unknown'\n\n def __init__(self, name):\n FortranRoutine.__init__(self, name=name, filename='')\n\n def dependencies(self):\n return []", + "docstring": "Wrapper for a Fortran routine for which the corresponding file is not known.", + "type": "class", + "file_path": "numpy\\numpy\\linalg\\lapack_lite\\make_lite.py", + "ast_data": "ClassDef name:UnknownFortranRoutine Assign FunctionDef name:__init__ arg:self arg:name arguments arg arg Call FunctionDef name:dependencies arg:self arguments arg Return return:no" + }, + { + "library": "seaborn", + "name": "_kde_support", + "source_code": "def _kde_support(data, bw, gridsize, cut, clip):\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n support = np.linspace(support_min, support_max, gridsize)\n return support", + "docstring": "Establish support for a kernel density estimate.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:_kde_support arg:data arg:bw arg:gridsize arg:cut arg:clip arguments arg arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_boolrelextrema", + "source_code": "def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):\n if int(order) != order or order < 1:\n raise ValueError('Order must be an int >= 1')\n datalen = data.shape[axis]\n locs = np.arange(0, datalen)\n results = np.ones(data.shape, dtype=bool)\n main = data.take(locs, axis=axis, mode=mode)\n for shift in range(1, order + 1):\n plus = data.take(locs + shift, axis=axis, mode=mode)\n minus = data.take(locs - shift, axis=axis, mode=mode)\n results &= comparator(main, plus)\n results &= comparator(main, minus)\n if ~results.any():\n return results\n return results", + "docstring": "Calculate the relative extrema of . Relative extrema are calculated by finding locations where `datadata` that is True at an extrema, False otherwise. See also -------- argrelmax, argrelmin Examples -------- >>> import numpy as np >>> from scipy.signal._peak_finding import _boolrelextrema >>> testdata = np.array([1,2,3,2,1]) >>> _boolrelextrema(testdata, np.greater, axis=0) array([False, False, True, False, False], dtype=bool)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:_boolrelextrema arg:data arg:comparator arg:axis arg:order arg:mode arguments arg arg arg arg arg If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Call Assign Call For Call Assign Call Assign Call Call Call If Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "iat", + "source_code": "@property\ndef iat(self) -> _iAtIndexer:\n return _iAtIndexer('iat', self)", + "docstring": "Access a single value for a row/column pair by integer position. Similar to `` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame( ... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], columns=[\"A\", \"B\", \"C\"] ... ) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] np.int64(1) Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] np.int64(10) Get value within a series >>> df.loc[0].iat[1] np.int64(2)", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:iat arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "get_dated_items", + "source_code": "def get_dated_items(self):\n raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')", + "docstring": "Obtain the list of dates and items.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_log_signature_report", + "source_code": "def _log_signature_report(signature_def_map, excluded_signatures):\n sig_names_by_method_name = collections.defaultdict(list)\n for method_name in _FRIENDLY_METHOD_NAMES:\n sig_names_by_method_name[method_name] = []\n for signature_name, sig in signature_def_map.items():\n sig_names_by_method_name[sig.method_name].append(signature_name)\n for method_name, sig_names in sig_names_by_method_name.items():\n if method_name in _FRIENDLY_METHOD_NAMES:\n method_name = _FRIENDLY_METHOD_NAMES[method_name]\n logging.info('Signatures INCLUDED in export for {}: {}'.format(method_name, sig_names if sig_names else 'None'))\n if excluded_signatures:\n logging.info('Signatures EXCLUDED from export because they cannot be be served via TensorFlow Serving APIs:')\n for signature_name, message in excluded_signatures.items():\n logging.info(\"'{}' : {}\".format(signature_name, message))\n if not signature_def_map:\n logging.warn('Export includes no signatures!')\n elif signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in signature_def_map:\n logging.warn('Export includes no default signature!')", + "docstring": "Log a report of which signatures were produced.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py", + "ast_data": "FunctionDef name:_log_signature_report arg:signature_def_map arg:excluded_signatures arguments arg arg Assign Call For Assign For Call Call For Call If Compare Assign Call Call If Call For Call Call Call If Call If Compare Call" + }, + { + "library": "pytorch", + "name": "wait_stream", + "source_code": "def wait_stream(self, stream) -> None:\n self.wait_event(stream.record_event())", + "docstring": "Synchronize with another stream. All future work submitted to this stream will wait until all kernels submitted to a given stream at the time of call complete. Args: stream (Stream): a stream to synchronize. .. note:: This function returns without waiting for currently enqueued kernels in :attr:: only future operations are affected.", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\streams.py", + "ast_data": "FunctionDef name:wait_stream arg:self arg:stream arguments arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_max_pool_grad_flops", + "source_code": "@ops.RegisterStatistics('MaxPoolGrad', 'flops')\ndef _max_pool_grad_flops(graph, node):\n _verify_conv_data_format(node)\n kernel_shape = list(node.attr['ksize'].list.i)\n kernel_area = _list_product(kernel_shape)\n orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n orig_out_shape.assert_is_fully_defined()\n max_pool_ops = kernel_area * orig_out_shape.num_elements()\n return ops.OpStats('flops', max_pool_ops + orig_out_shape.num_elements())", + "docstring": "Compute flops for MaxPoolGrad operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_max_pool_grad_flops arg:graph arg:node arguments arg arg Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "_quantile_unchecked", + "source_code": "def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, method='linear', keepdims=False, weights=None):\n return _ureduce(a, func=_quantile_ureduce_func, q=q, weights=weights, keepdims=keepdims, axis=axis, out=out, overwrite_input=overwrite_input, method=method)", + "docstring": "Assumes that q is in [0, 1], and is an ndarray", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:_quantile_unchecked arg:a arg:q arg:axis arg:out arg:overwrite_input arg:method arg:keepdims arg:weights arguments arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_bwd_send_ops", + "source_code": "def get_bwd_send_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:\n self._check_chunk_id(bwd_chunk_id)\n if not self.has_backward or self.is_first:\n return []\n if self.grad_send_info is None:\n self.grad_send_info = self._create_grad_send_info(self.args_recv_info[0])\n ops: list[dist.P2POp] = []\n grads_input = self.bwd_cache.pop(bwd_chunk_id)\n for grad, grad_recv_stage in zip(grads_input, self.grad_send_info):\n if isinstance(grad, torch.Tensor) and grad_recv_stage is not None:\n logger.debug('%s Sending gradient to Stage %s: %s', self.log_prefix, grad_recv_stage, grad.size())\n peer_rank = self.stage_index_to_group_rank[grad_recv_stage]\n peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank)\n ops.append(dist.P2POp(dist.isend, grad, peer_global_rank, self.group))\n elif not (grad is None and grad_recv_stage is None):\n raise RuntimeError(f'[{self.stage_index}] for chunk {bwd_chunk_id} has gradients {grad} and is expecting to send gradients to stage {grad_recv_stage}')\n return ops", + "docstring": "Get the gradient send ops for current stage's backward.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:get_bwd_send_ops arg:self arg:bwd_chunk_id arguments arg arg Call If BoolOp Return return:no If Compare Assign Call Assign Call For Call If BoolOp Call Compare Call Call Assign Assign Compare Call Call Call If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "pygame", + "name": "__repr__", + "source_code": "def __repr__(self):\n return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))", + "docstring": "Turn the class into a string.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "kornia", + "name": "shift_rgb", + "source_code": "def shift_rgb(image: Tensor, r_shift: Tensor, g_shift: Tensor, b_shift: Tensor) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(image)\n KORNIA_CHECK_IS_COLOR(image, f'with shape {image.shape}')\n shifts = [r_shift, g_shift, b_shift]\n shifted = (image + stack(shifts, dim=1).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n return shifted", + "docstring": "Shift rgb channels. Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\shift_rgb.py", + "ast_data": "FunctionDef name:shift_rgb arg:image arg:r_shift arg:g_shift arg:b_shift arguments arg arg arg arg Call Call Assign Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ragged_to_dense", + "source_code": "def ragged_to_dense(rt_input, default_value=None, shape=None):\n return rt_input.to_tensor(default_value=default_value, shape=shape)", + "docstring": "Create a dense tensor from a ragged tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_conversion_ops.py", + "ast_data": "FunctionDef name:ragged_to_dense arg:rt_input arg:default_value arg:shape arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "update_hash_with_array", + "source_code": "def update_hash_with_array(hash_value, int_array):\n if int_array is not None:\n for i in int_array:\n hash_value = update_hash_with_primitive_value(hash_value, i)\n return hash_value", + "docstring": "Update the hash value using a TFLite int array. Args: hash_value (int): The current hash value. int_array: A TFLite int array to incorporate into the hash. Returns: int: The updated hash value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:update_hash_with_array arg:hash_value arg:int_array arguments arg arg If Compare For Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, widthA=1.0, lengthA=0.2, angleA=None):\n super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)", + "docstring": "Parameters ---------- widthA : float, default: 1.0 Width of the bracket. lengthA : float, default: 0.2 Length of the bracket. angleA : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arguments arg arg arg arg Call Call" + }, + { + "library": "authlib", + "name": "validate_id_token_encrypted_response_enc", + "source_code": "def validate_id_token_encrypted_response_enc(self):\n if self.get('id_token_encrypted_response_enc') and (not self.get('id_token_encrypted_response_alg')):\n raise InvalidClaimError('id_token_encrypted_response_enc')\n if self.get('id_token_encrypted_response_alg'):\n self.setdefault('id_token_encrypted_response_enc', 'A128CBC-HS256')\n self._validate_claim_value('id_token_encrypted_response_enc')", + "docstring": "JWE enc algorithm [JWA] REQUIRED for encrypting the ID Token issued to this Client. If id_token_encrypted_response_alg is specified, the default id_token_encrypted_response_enc value is A128CBC-HS256. When id_token_encrypted_response_enc is included, id_token_encrypted_response_alg MUST also be provided.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_id_token_encrypted_response_enc arg:self arguments arg If BoolOp Call Call Raise Call If Call Call Call" + }, + { + "library": "pytorch", + "name": "replace_autocast_with_hop_pass", + "source_code": "def replace_autocast_with_hop_pass(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n return _replace_with_hop_pass_helper(gm, graph_signature, _sequential_split_and_maybe_inline_subgraphs)", + "docstring": "Split gm into sub-graph-modules using , and then recursively call itself on each of the submodules.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py", + "ast_data": "FunctionDef name:replace_autocast_with_hop_pass arg:gm arg:graph_signature arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_predict_proba", + "source_code": "def _predict_proba(lr, X):\n pred = safe_sparse_dot(X, lr.coef_.T)\n if hasattr(lr, 'intercept_'):\n pred += lr.intercept_\n return softmax(pred)", + "docstring": "Predict proba for lightning for n_classes >=3.", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_saga.py", + "ast_data": "FunctionDef name:_predict_proba arg:lr arg:X arguments arg arg Assign Call If Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ROCmKernel", + "source_code": "class ROCmKernel(Kernel):\n overrides = OpOverrides", + "docstring": "Baseclass for ROCm based Kernels", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_kernel.py", + "ast_data": "ClassDef name:ROCmKernel Assign" + }, + { + "library": "pytorch", + "name": "get_detector_name", + "source_code": "def get_detector_name(self) -> str:\n return 'outlier_detector'", + "docstring": "Returns the name of this detector", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "hand_clean_DELETE", + "source_code": "def hand_clean_DELETE(self):\n if self.cleaned_data.get(DELETION_FIELD_NAME, False):\n using = router.db_for_write(self._meta.model)\n collector = NestedObjects(using=using)\n if self.instance._state.adding:\n return\n collector.collect([self.instance])\n if collector.protected:\n objs = []\n for p in collector.protected:\n objs.append(_('%(class_name)s %(instance)s') % {'class_name': p._meta.verbose_name, 'instance': p})\n params = {'class_name': self._meta.model._meta.verbose_name, 'instance': self.instance, 'related_objects': get_text_list(objs, _('and'))}\n msg = _('Deleting %(class_name)s %(instance)s would require deleting the following protected related objects: %(related_objects)s')\n raise ValidationError(msg, code='deleting_protected', params=params)", + "docstring": "We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic \"deletion_field\" of the InlineModelAdmin.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:hand_clean_DELETE arg:self arguments arg If Call Assign Call Assign Call If Return return:no Call If Assign For Call Call Assign Call Call Assign Call Raise Call" + }, + { + "library": "django", + "name": "intersection", + "source_code": "def intersection(self, other):\n return self._topology(capi.geos_intersection(self.ptr, other.ptr))", + "docstring": "Return a Geometry representing the points shared by this Geometry and other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:intersection arg:self arg:other arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "tree_leaves_with_path", + "source_code": "def tree_leaves_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[tuple[KeyPath, Any]]:\n return list(_generate_key_paths((), tree, is_leaf))", + "docstring": "Gets the leaves of a pytree like `tree_flatten_with_path_fnregister_pytree_nodeTrue`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A list of (key path, leaf) pairs.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_pytree.py", + "ast_data": "FunctionDef name:tree_leaves_with_path arg:tree arg:is_leaf arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "to_dict", + "source_code": "def to_dict(self) -> dict[str, Union[int, float, str, list[int], list[float], list[str]]]:\n attrs: dict[str, Union[int, float, str, list[int], list[float], list[str]]] = {}\n for i, key in enumerate(self.attr_keys):\n attr_type = self.attr_types[i]\n if attr_type == _INT_TYPE:\n attrs[key] = self.attr_ints[self.attr_pos[i][0]]\n elif attr_type == _FLOAT_TYPE:\n attrs[key] = self.attr_floats[self.attr_pos[i][0]]\n elif attr_type == _STRING_TYPE:\n attrs[key] = self.attr_strs[self.attr_pos[i][0]]\n elif attr_type == _FLOAT_SEQ_TYPE:\n attrs[key] = self.attr_floats[self.attr_pos[i][0]:self.attr_pos[i][1]]\n elif attr_type == _INT_SEQ_TYPE:\n attrs[key] = self.attr_ints[self.attr_pos[i][0]:self.attr_pos[i][1]]\n elif attr_type == _STRING_SEQ_TYPE:\n attrs[key] = self.attr_strs[self.attr_pos[i][0]:self.attr_pos[i][1]]\n else:\n raise ValueError(f'Unsupported attribute type: {attr_type}')\n return attrs", + "docstring": "Convert the encoded attributes back to a dictionary for creating an ONNX node.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\ops\\_symbolic_impl.py", + "ast_data": "FunctionDef name:to_dict arg:self arguments arg For Call Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "shape_from_header", + "source_code": "def shape_from_header(self, hdr):\n mclass = hdr.mclass\n if mclass == mxFULL_CLASS:\n shape = tuple(map(int, hdr.dims))\n elif mclass == mxCHAR_CLASS:\n shape = tuple(map(int, hdr.dims))\n if self.chars_as_strings:\n shape = shape[:-1]\n elif mclass == mxSPARSE_CLASS:\n dt = hdr.dtype\n dims = hdr.dims\n if not (len(dims) == 2 and dims[0] >= 1 and (dims[1] >= 1)):\n return ()\n self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)\n rows = np.ndarray(shape=(), dtype=dt, buffer=self.mat_stream.read(dt.itemsize))\n self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)\n cols = np.ndarray(shape=(), dtype=dt, buffer=self.mat_stream.read(dt.itemsize))\n shape = (int(rows), int(cols))\n else:\n raise TypeError(f'No reader for class code {mclass}')\n if self.squeeze_me:\n shape = tuple([x for x in shape if x != 1])\n return shape", + "docstring": "Read the shape of the array described by the header. The file position after this call is unspecified.", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:shape_from_header arg:self arg:hdr arguments arg arg Assign If Compare Assign Call Call If Compare Assign Call Call If Assign If Compare Assign Assign If BoolOp Compare Call Compare Compare Return return:no Call Assign Call Call Call Assign Call Call Assign Call Call Raise Call If Assign Call Compare Return return:yes" + }, + { + "library": "scipy", + "name": "saltelli_2010", + "source_code": "def saltelli_2010(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray) -> tuple[np.ndarray, np.ndarray]:\n var = np.var([f_A, f_B], axis=(0, -1))\n s = np.mean(f_B * (f_AB - f_A), axis=-1) / var\n st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var\n return (s.T, st.T)", + "docstring": "Saltelli2010 formulation. .. math:: S_i = \\frac{1}{N} \\sum_{j=1}^N f(\\mathbf{B})_j (f(\\mathbf{AB}^{(i)})_j - f(\\mathbf{A})_j) .. math:: S_{T_i} = \\frac{1}{N} \\sum_{j=1}^N (f(\\mathbf{A})_j - f(\\mathbf{AB}^{(i)})_j)^2 Parameters ---------- f_A, f_B : array_like (s, n) Function values at A and B, respectively f_AB : array_like (d, s, n) Function values at each of the AB pages Returns ------- s, st : array_like (s, d) First order and total order Sobol' indices. References ---------- .. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and S. Tarantola. \"Variance based sensitivity analysis of model output. Design and estimator for the total sensitivity index.\" Computer Physics Communications, 181(2):259-270, :doi:, 2010.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_sensitivity_analysis.py", + "ast_data": "FunctionDef name:saltelli_2010 arg:f_A arg:f_B arg:f_AB arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "set_growth_factor", + "source_code": "def set_growth_factor(self, new_factor: float) -> None:\n self._growth_factor = new_factor", + "docstring": "Set a new scale growth factor. Args: new_scale (float): Value to use as the new scale growth factor.", + "type": "method", + "file_path": "pytorch\\torch\\amp\\grad_scaler.py", + "ast_data": "FunctionDef name:set_growth_factor arg:self arg:new_factor arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "_free_unsharded_flat_param", + "source_code": "def _free_unsharded_flat_param(self):\n self._check_sharded_strategy()\n unsharded_flat_param = self._get_padded_unsharded_flat_param()\n self._check_on_compute_device(unsharded_flat_param)\n _no_dispatch_record_stream(unsharded_flat_param, self._device_handle.current_stream())\n _free_storage(unsharded_flat_param)", + "docstring": "Free the padded unsharded flat parameter. We allow this function to be called even when storage is not allocated The tensor to free depends on the calling context since the unshard may have forced full precision, in which case a different tensor is used.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_free_unsharded_flat_param arg:self arguments arg Call Assign Call Call Call Call Call" + }, + { + "library": "kornia", + "name": "Vflip", + "source_code": "class Vflip(Module):\n\n def forward(self, input: Tensor) -> Tensor:\n return vflip(input)\n\n def __repr__(self) -> str:\n return self.__class__.__name__", + "docstring": "Vertically flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The vertically flipped image tensor. Examples: >>> vflip = Vflip() >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.] ... ]]]) >>> vflip(input) tensor([[[[0., 1., 1.], [0., 0., 0.], [0., 0., 0.]]]])", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\transform\\flips.py", + "ast_data": "ClassDef name:Vflip FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_WrappedTritonKernel", + "source_code": "class _WrappedTritonKernel:\n\n def __init__(self, kernel):\n self.kernel = kernel\n self.kernel_invoked = False\n\n def __call__(self, *args, **kwargs):\n res = self.kernel(*args, **kwargs)\n self.kernel_invoked = True\n return res", + "docstring": "Just a simple wrapper to store some metadata for testing purposes.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "ClassDef name:_WrappedTritonKernel FunctionDef name:__init__ arg:self arg:kernel arguments arg arg Assign Assign FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "generate_equalization_qconfig", + "source_code": "def generate_equalization_qconfig(self) -> EqualizationQConfig:\n return default_equalization_qconfig", + "docstring": "This returns the equalization configuration for a module. For now, it just returns the default, but as more equalization options become possible, this method can get more fleshed out with more nuanced granularity. Returns the generated equalization QConfig according to what a valid configuration is", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:generate_equalization_qconfig arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "MonthArchiveView", + "source_code": "class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):\n template_name_suffix = '_archive_month'", + "docstring": "List of objects published in a given month.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:MonthArchiveView Assign" + }, + { + "library": "tensorflow", + "name": "state", + "source_code": "@property\ndef state(self):\n return self._state_var", + "docstring": "The internal state of the RNG.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "PackagingErrorReason", + "source_code": "class PackagingErrorReason(Enum):\n\n def __repr__(self):\n return f'<{self.__class__.__name__}.{self.name}>'\n IS_EXTENSION_MODULE = 'Module is a C extension module. torch.package supports Python modules only.'\n NO_DUNDER_FILE = 'Module had no __file__ defined.'\n SOURCE_FILE_NOT_FOUND = 'Module had a __file__, but we could not find it in your filesystem.'\n DEPENDENCY_RESOLUTION_FAILED = 'Dependency resolution failed.'\n NO_ACTION = 'Module did not match against any action pattern. Extern, mock, or intern it.'\n DENIED = 'Module was denied by a pattern.'\n MOCKED_BUT_STILL_USED = 'Module was mocked out, but is still being used in the package. Please intern or extern the mocked modules if objects are supposed to be in the package.'", + "docstring": "Listing of different reasons a dependency may fail to package. This enum is used to provide good error messages when :class: is raised.", + "type": "class", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "ClassDef name:PackagingErrorReason FunctionDef name:__repr__ arg:self arguments arg Return return:yes Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "map_fn", + "source_code": "def map_fn(*columns):\n features = collections.OrderedDict(zip(column_names, columns))\n if label_name is not None:\n label = features.pop(label_name)\n return (features, label)\n return features", + "docstring": "Organizes columns into a features dictionary. Args: *columns: list of s corresponding to one csv record. Returns: An OrderedDict of feature names to values for that particular record. If label_name is provided, extracts the label feature to be returned as the second element of the tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py", + "ast_data": "FunctionDef name:map_fn arguments arg Assign Call Call If Compare Assign Call Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "block", + "source_code": "@array_function_dispatch(_block_dispatcher)\ndef block(arrays):\n arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)\n if list_ndim * final_size > 2 * 512 * 512:\n return _block_slicing(arrays, list_ndim, result_ndim)\n else:\n return _block_concatenate(arrays, list_ndim, result_ndim)", + "docstring": "Assemble an nd-array from nested lists of blocks. Blocks in the innermost lists are concatenated (see ) along the last dimension (-1), then these are concatenated along the second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using the normal rules. Instead, leading axes of size 1 are inserted, to make `blockblockhstackblockvstackatleast_1datleast_2d`: >>> a = np.array(0) >>> b = np.array([1]) >>> np.block([a]) # atleast_1d(a) array([0]) >>> np.block([b]) # atleast_1d(b) array([1]) >>> np.block([[a]]) # atleast_2d(a) array([[0]]) >>> np.block([[b]]) # atleast_2d(b) array([[1]])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\shape_base.py", + "ast_data": "FunctionDef name:block arg:arrays arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "get_result", + "source_code": "def get_result(self, x, flag=_ECONVERGED):\n return (x, self.function_calls, self.iterations, flag)", + "docstring": "Package the result and statistics into a tuple.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_zeros_py.py", + "ast_data": "FunctionDef name:get_result arg:self arg:x arg:flag arguments arg arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "__call__", + "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[List[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n if not self._disable_features:\n decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super(ImageSequential, self).__call__)\n _output_image = decorated_forward(*inputs, **kwargs)\n if len(inputs) == 1 and isinstance(inputs[0], dict):\n original_keys, in_data_keys, inputs, invalid_data = self._preproc_dict_data(inputs[0])\n else:\n in_data_keys = kwargs.get('data_keys', self.data_keys)\n data_keys = self.transform_op.preproc_datakeys(in_data_keys)\n if len(data_keys) > 1 and DataKey.INPUT in data_keys:\n idx = data_keys.index(DataKey.INPUT)\n if output_type == 'tensor':\n self._output_image = _output_image\n if isinstance(_output_image, dict):\n self._output_image[original_keys[idx]] = _output_image[original_keys[idx]]\n else:\n self._output_image[idx] = _output_image[idx]\n elif isinstance(_output_image, dict):\n self._output_image[original_keys[idx]] = _output_image[original_keys[idx]]\n else:\n self._output_image[idx] = _output_image[idx]\n else:\n self._output_image = _output_image\n else:\n _output_image = super(ImageSequential, self).__call__(*inputs, **kwargs)\n return _output_image", + "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\augment.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If BoolOp Compare Call Call Assign Call Assign Call Assign Call If BoolOp Compare Call Compare Assign Call If Compare Assign If Call Assign Assign If Call Assign Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, iterator):\n self._iterator = iterator", + "docstring": "Initialize a UTF-8 stream encoder instance.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\encoding.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:iterator arguments arg arg Assign" + }, + { + "library": "pandas", + "name": "array_equals", + "source_code": "def array_equals(left: ArrayLike, right: ArrayLike) -> bool:\n if left.dtype != right.dtype:\n return False\n elif isinstance(left, ABCExtensionArray):\n return left.equals(right)\n else:\n return array_equivalent(left, right, dtype_equal=True)", + "docstring": "ExtensionArray-compatible implementation of array_equivalent.", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\missing.py", + "ast_data": "FunctionDef name:array_equals arg:left arg:right arguments arg arg If Compare Return return:yes If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_example_value", + "source_code": "def _get_example_value(node: fx.Node) -> Optional[str]:\n if 'example_value' in node.meta:\n return node.meta['example_value']\n elif 'val' in node.meta:\n return node.meta['val']\n else:\n return None", + "docstring": "Get the example value key for a node, since dynamo uses \"example_value\" while non-strict export uses \"val.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\runtime_assert.py", + "ast_data": "FunctionDef name:_get_example_value arg:node arguments arg If Compare Return return:yes If Compare Return return:yes Return return:no" + }, + { + "library": "kornia", + "name": "_mean_isotropic_scale_normalize", + "source_code": "def _mean_isotropic_scale_normalize(points: torch.Tensor, eps: float=1e-08) -> Tuple[torch.Tensor, torch.Tensor]:\n KORNIA_CHECK_SHAPE(points, ['B', 'N', 'D'])\n x_mean = torch.mean(points, dim=1, keepdim=True)\n scale = (points - x_mean).norm(dim=-1, p=2).mean(dim=-1)\n D_int = points.shape[-1]\n D_float = torch.tensor(points.shape[-1], dtype=torch.float64, device=points.device)\n scale = torch.sqrt(D_float) / (scale + eps)\n transform = eye_like(D_int + 1, points)\n idxs = arange(D_int, dtype=torch.int64, device=points.device)\n transform[:, idxs, idxs] = transform[:, idxs, idxs] * scale[:, None]\n transform[:, idxs, D_int] = transform[:, idxs, D_int] + -scale[:, None] * x_mean[:, 0, idxs]\n points_norm = transform_points(transform, points)\n return (points_norm, transform)", + "docstring": "Normalize points. Args: points : Tensor containing the points to be normalized with shape :math:. eps : Small value to avoid division by zero error. Returns: Tuple containing the normalized points in the shape :math: and the transformation matrix in the shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\calibration\\pnp.py", + "ast_data": "FunctionDef name:_mean_isotropic_scale_normalize arg:points arg:eps arguments arg arg Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pygame", + "name": "__init__", + "source_code": "def __init__(self, ratio):\n self.ratio = ratio", + "docstring": "creates a new collide_circle_ratio callable instance The given ratio is expected to be a floating point value used to scale the underlying sprite radius before checking for collisions. When the ratio is ratio=1.0, then it behaves exactly like the collide_circle method.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_raise_untracked_capture_error", + "source_code": "def _raise_untracked_capture_error(function_name, capture, internal_capture=None, node_path=None):\n msg = f\"Tried to export a function which references an 'untracked' resource. TensorFlow objects (e.g. tf.Variable) captured by functions must be 'tracked' by assigning them to an attribute of a tracked object or assigned to an attribute of the main object directly. See the information below:\\n\\tFunction name = {function_name}\"\n if node_path is not None:\n msg += f'\\n\\tPath to Function = {node_path}'\n msg += f'\\n\\tCaptured Tensor = {capture}'\n msg += f'\\n\\t{_get_trackable_parent_error_string(capture)}'\n if internal_capture is not None:\n msg += f'\\n\\tInternal Tensor = {internal_capture}'\n raise AssertionError(msg)", + "docstring": "Raises AssertionError due to being unable to export a function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\saved_model_exported_concrete.py", + "ast_data": "FunctionDef name:_raise_untracked_capture_error arg:function_name arg:capture arg:internal_capture arg:node_path arguments arg arg arg arg Assign If Compare Call If Compare Raise Call" + }, + { + "library": "pygame", + "name": "add_internal", + "source_code": "def add_internal(self, sprite, layer=None):\n self.spritedict[sprite] = None", + "docstring": "For adding a sprite to this group internally. :param sprite: The sprite we are adding. :param layer: the layer to add to, if the group type supports layers", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:add_internal arg:self arg:sprite arg:layer arguments arg arg arg Assign" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "@classmethod\ndef apply(cls, module, name, mask):\n return super().apply(module, name, mask=mask)", + "docstring": "Add pruning on the fly and reparametrization of a tensor. Adds the forward pre-hook that enables pruning on the fly and the reparametrization of a tensor in terms of the original tensor and the pruning mask. Args: module (nn.Module): module containing the tensor to prune name (str): parameter name within `` on which pruning will act.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:apply arg:cls arg:module arg:name arg:mask arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_builtin_constant_ids", + "source_code": "@FunctionIdSet\ndef _builtin_constant_ids() -> dict[int, str]:\n rv = {id(v): f'builtins.{k}' for k, v in builtins.__dict__.items() if not k.startswith('_') and (not callable(v))}\n return rv", + "docstring": "Collects constant builtins by eliminating callable items.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\trace_rules.py", + "ast_data": "FunctionDef name:_builtin_constant_ids arguments Assign Call Call BoolOp Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_dense_tensor", + "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n return transformation_cache.get(self, state_manager)", + "docstring": "Returns dense representing numeric feature. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Dense created within .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_RandomStates", + "source_code": "class _RandomStates(_Constraint):\n\n def __init__(self):\n super().__init__()\n self._constraints = [Interval(Integral, 0, 2 ** 32 - 1, closed='both'), _InstancesOf(np.random.RandomState), _NoneConstraint()]\n\n def is_satisfied_by(self, val):\n return any((c.is_satisfied_by(val) for c in self._constraints))\n\n def __str__(self):\n return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'", + "docstring": "Constraint representing random states. Convenience class for [Interval(Integral, 0, 2**32 - 1, closed=\"both\"), np.random.RandomState, None]", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:_RandomStates FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "push_forwardprop_state", + "source_code": "@contextlib.contextmanager\ndef push_forwardprop_state():\n try:\n pywrap_tfe.TFE_Py_ForwardAccumulatorPushState()\n yield\n finally:\n pywrap_tfe.TFE_Py_ForwardAccumulatorPopState()", + "docstring": "Temporarily push or pop transient state for accumulators in the active set. Allows an accumulator which is currently processing an operation to temporarily reset its state. This is useful when building forwardprop versions of functions, where an accumulator will trigger function building and then must process captured symbolic tensors while building it. Without pushing and popping, accumulators ignore operations executed as a direct result of their own jvp computations. Yields: None (used for its side effect).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop_util.py", + "ast_data": "FunctionDef name:push_forwardprop_state arguments Try Call Call" + }, + { + "library": "pytorch", + "name": "kwargs", + "source_code": "@property\ndef kwargs(self) -> dict[str, Argument]:\n return self._kwargs", + "docstring": "The dict of keyword arguments to this `Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:kwargs arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "argstoarray", + "source_code": "def argstoarray(*args):\n if len(args) == 1 and (not isinstance(args[0], ndarray)):\n output = ma.asarray(args[0])\n if output.ndim != 2:\n raise ValueError('The input should be 2D')\n else:\n n = len(args)\n m = max([len(k) for k in args])\n output = ma.array(np.empty((n, m), dtype=float), mask=True)\n for k, v in enumerate(args):\n output[k, :len(v)] = v\n output[np.logical_not(np.isfinite(output._data))] = masked\n return output", + "docstring": "Constructs a 2D array from a group of sequences. Sequences are filled with missing values to match the length of the longest sequence. Parameters ---------- *args : sequences Group of sequences. Returns ------- argstoarray : MaskedArray A ( x ) masked array, where is the number of arguments and the length of the longest argument. Notes ----- has identical behavior, but is called with a sequence of sequences. Examples -------- A 2D masked array constructed from a group of sequences is returned. >>> from scipy.stats.mstats import argstoarray >>> argstoarray([1, 2, 3], [4, 5, 6]) masked_array( data=[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], mask=[[False, False, False], [False, False, False]], fill_value=1e+20) The returned masked array filled with missing values when the lengths of sequences are different. >>> argstoarray([1, 3], [4, 5, 6]) masked_array( data=[[1.0, 3.0, --], [4.0, 5.0, 6.0]], mask=[[False, False, True], [False, False, False]], fill_value=1e+20)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:argstoarray arguments arg If BoolOp Compare Call Call Assign Call If Compare Raise Call Assign Call Assign Call Call Assign Call Call For Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "make_one_shot_iterator", + "source_code": "@deprecation.deprecated(None, 'This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.')\ndef make_one_shot_iterator(self) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:\n return self._make_one_shot_iterator()", + "docstring": "Creates an iterator for elements of this dataset. Note: The returned iterator will be initialized automatically. A \"one-shot\" iterator does not currently support re-initialization. For that see . Example: Returns: An for elements of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:make_one_shot_iterator arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "set_td_classes", + "source_code": "def set_td_classes(self, classes: DataFrame) -> Styler:\n if not classes.index.is_unique or not classes.columns.is_unique:\n raise KeyError('Classes render only if `classes` has unique index and columns.')\n classes = classes.reindex_like(self.data)\n for r, row_tup in enumerate(classes.itertuples()):\n for c, value in enumerate(row_tup[1:]):\n if not (pd.isna(value) or value == ''):\n self.cell_context[r, c] = str(value)\n return self", + "docstring": "Set the ` HTML elements. See Also -------- Styler.set_table_styles: Set the table styles included within the MultiIndexclassesDataFrame` as a subset of the underlying, >>> df = pd.DataFrame( ... [[1, 2], [3, 4]], ... index=[\"a\", \"b\"], ... columns=[[\"level0\", \"level0\"], [\"level1a\", \"level1b\"]], ... ) >>> classes = pd.DataFrame( ... [\"min-val\"], index=[\"a\"], columns=[[\"level0\"], [\"level1a\"]] ... ) >>> df.style.set_td_classes(classes) # doctest: +SKIP Form of the output with new additional css classes, >>> from pandas.io.formats.style import Styler >>> df = pd.DataFrame([[1]]) >>> css = pd.DataFrame([[\"other-class\"]]) >>> s = Styler(df, uuid=\"_\", cell_ids=False).set_td_classes(css) >>> s.hide(axis=0).to_html() # doctest: +SKIP '' '' ' ' ' 0' ' ' ' ' ' 1' ' ' ''", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:set_td_classes arg:self arg:classes arguments arg arg If BoolOp Raise Call Assign Call For Call Call For Call If BoolOp Call Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "transform", + "source_code": "def transform(node, ctx):\n node = qual_names.resolve(node)\n node = CallTreeTransformer(ctx).visit(node)\n return node", + "docstring": "Transform function call to the compiled counterparts. Args: node: AST ctx: EntityContext Returns: A tuple (node, new_names): node: The transformed AST new_names: set(string), containing any newly-generated names", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py", + "ast_data": "FunctionDef name:transform arg:node arg:ctx arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_ydata", + "source_code": "def get_ydata(self, orig=True):\n if orig:\n return self._yorig\n if self._invalidy:\n self.recache()\n return self._y", + "docstring": "Return the ydata. If *orig* is *True*, return the original data, else the processed data.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:get_ydata arg:self arg:orig arguments arg arg If Return return:yes If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "git_config_guard", + "source_code": "@contextlib.contextmanager\ndef git_config_guard(repo: GitRepo) -> Generator[None, None, None]:\n user_email = repo._run_git('config', 'user.email')\n user_name = repo._run_git('config', 'user.name')\n try:\n yield\n finally:\n if user_email:\n repo._run_git('config', '--global', 'user.email', user_email)\n if user_name:\n repo._run_git('config', '--global', 'user.name', user_name)", + "docstring": "Restores user.name and user.email global properties after context is finished", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\tryrebase.py", + "ast_data": "FunctionDef name:git_config_guard arg:repo arguments arg Assign Call Assign Call Try If Call If Call" + }, + { + "library": "tensorflow", + "name": "scatter_div", + "source_code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n return gen_state_ops.scatter_div(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", + "docstring": "Divide this variable by . Args: sparse_delta: to divide this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:scatter_div arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "savepoint_commit", + "source_code": "@async_unsafe\ndef savepoint_commit(self, sid):\n if not self._savepoint_allowed():\n return\n self.validate_thread_sharing()\n self._savepoint_commit(sid)", + "docstring": "Release a savepoint. Do nothing if savepoints are not supported.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:savepoint_commit arg:self arg:sid arguments arg arg If Call Return return:no Call Call" + }, + { + "library": "tensorflow", + "name": "__array__", + "source_code": "def __array__(self, dtype=None):\n return numpy_compat.np_asarray(self.numpy(), dtype=dtype)", + "docstring": "Allows direct conversion to a numpy array. >>> np.array(tf.Variable([1.0])) array([1.], dtype=float32) Returns: The variable value as a numpy array.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_can_fast_transpose", + "source_code": "@property\ndef _can_fast_transpose(self) -> bool:\n return False", + "docstring": "Is transposing an array with this dtype zero-copy? Only relevant for cases where _supports_2d is True.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:_can_fast_transpose arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_newton_quadratic", + "source_code": "def _newton_quadratic(ab, fab, d, fd, k):\n a, b = ab\n fa, fb = fab\n _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], forward=True, full=False)\n\n def _P(x):\n return (A * (x - b) + B) * (x - a) + fa\n if A == 0:\n r = a - fa / B\n else:\n r = a if np.sign(A) * np.sign(fa) > 0 else b\n for i in range(k):\n r1 = r - _P(r) / (B + A * (2 * r - a - b))\n if not ab[0] < r1 < ab[1]:\n if ab[0] < r < ab[1]:\n return r\n r = sum(ab) / 2.0\n break\n r = r1\n return r", + "docstring": "Apply Newton-Raphson like steps, using divided differences to approximate f' ab is a real interval [a, b] containing a root, fab holds the real values of f(a), f(b) d is a real number outside [ab, b] k is the number of steps to apply", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_zeros_py.py", + "ast_data": "FunctionDef name:_newton_quadratic arg:ab arg:fab arg:d arg:fd arg:k arguments arg arg arg arg arg Assign Assign Assign Call FunctionDef name:_P arg:x arguments arg Return return:yes If Compare Assign Assign Compare Call Call For Call Assign Call If Compare If Compare Return return:yes Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "cleanup", + "source_code": "def cleanup(self):\n self.unregister_callback()", + "docstring": "Calls unregister_callback() to make sure to finalize outputs.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:cleanup arg:self arguments arg Call" + }, + { + "library": "matplotlib", + "name": "_zalpha", + "source_code": "def _zalpha(colors, zs, min_alpha=0.3, _data_scale=None):\n if len(colors) == 0 or len(zs) == 0:\n return np.zeros((0, 4))\n min_alpha = np.clip(min_alpha, 0, 1)\n if _data_scale is None or _data_scale == 0:\n sats = np.ones_like(zs)\n else:\n sats = np.clip(1 - (zs - np.min(zs)) / _data_scale, min_alpha, 1)\n rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))\n return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])", + "docstring": "Modify the alpha values of the color list according to z-depth.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_zalpha arg:colors arg:zs arg:min_alpha arg:_data_scale arguments arg arg arg arg If BoolOp Compare Call Compare Call Return return:yes Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_path", + "source_code": "def get_path(self):\n codes = [Path.MOVETO]\n codes.extend((Path.LINETO if edge in self._visible_edges else Path.MOVETO for edge in self._edges))\n if Path.MOVETO not in codes[1:]:\n codes[-1] = Path.CLOSEPOLY\n return Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]], codes, readonly=True)", + "docstring": "Return a for the .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:get_path arg:self arguments arg Assign Call Compare If Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "cmd", + "source_code": "def cmd(expr: str, args: ParserElement) -> ParserElement:\n\n def names(elt: ParserElement) -> T.Generator[str, None, None]:\n if isinstance(elt, ParseExpression):\n for expr in elt.exprs:\n yield from names(expr)\n elif elt.resultsName:\n yield elt.resultsName\n csname = expr.split('{', 1)[0]\n err = csname + ''.join(('{%s}' % name for name in names(args))) if expr == csname else expr\n return csname - (args | Error(f'Expected {err}'))", + "docstring": "Helper to define TeX commands. `` where the names in the error message are taken from element names in *args*. If *expr* already includes arguments (e.g. \"\\cmd{arg}{...}\"), then they are stripped when constructing the parse element, but kept (and *expr* is used as is) in the error message.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:cmd arg:expr arg:args arguments arg arg FunctionDef name:names arg:elt arguments arg If Call For Call If Assign Call Assign Compare Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "read", + "source_code": "def read(self, index, name=None):\n return self._implementation.read(index, name=name)", + "docstring": "Read the value at location in the TensorArray. Args: index: 0-D. int32 tensor with the index to read from. name: A name for the operation (optional). Returns: The tensor at index .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:read arg:self arg:index arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_laplacian_kernel2d", + "source_code": "def get_laplacian_kernel2d(kernel_size: tuple[int, int] | int, *, device: Optional[Device]=None, dtype: Dtype=torch.float32) -> Tensor:\n ky, kx = _unpack_2d_ks(kernel_size)\n _check_kernel_size((ky, kx))\n kernel = torch.ones((ky, kx), device=device, dtype=dtype)\n mid_x = kx // 2\n mid_y = ky // 2\n kernel[mid_y, mid_x] = 1 - kernel.sum()\n return kernel", + "docstring": "Return Gaussian filter matrix coefficients. Args: kernel_size: filter size should be odd. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 2D tensor with laplacian filter matrix coefficients. Shape: - Output: :math: Examples: >>> get_laplacian_kernel2d(3) tensor([[ 1., 1., 1.], [ 1., -8., 1.], [ 1., 1., 1.]]) >>> get_laplacian_kernel2d(5) tensor([[ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.], [ 1., 1., -24., 1., 1.], [ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.]])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:get_laplacian_kernel2d arg:kernel_size arguments arg arg arg Assign Call Call Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "__next__", + "source_code": "def __next__(self):\n if self._leftover:\n output = self._leftover\n self._leftover = b''\n else:\n output = next(self._producer)\n self._unget_history = []\n self.position += len(output)\n return output", + "docstring": "Used when the exact number of bytes to read is unimportant. Return whatever chunk is conveniently returned from the iterator. Useful to avoid unnecessary bookkeeping if performance is an issue.", + "type": "method", + "file_path": "django\\django\\http\\multipartparser.py", + "ast_data": "FunctionDef name:__next__ arg:self arguments arg If Assign Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "curl_to_request_kwargs", + "source_code": "def curl_to_request_kwargs(curl_command: str, ignore_unknown_options: bool=True) -> dict[str, Any]:\n curl_args = split(curl_command)\n if curl_args[0] != 'curl':\n raise ValueError('A curl command must start with \"curl\"')\n parsed_args, argv = curl_parser.parse_known_args(curl_args[1:])\n if argv:\n msg = f'Unrecognized options: {', '.join(argv)}'\n if ignore_unknown_options:\n warnings.warn(msg)\n else:\n raise ValueError(msg)\n url = parsed_args.url\n parsed_url = urlparse(url)\n if not parsed_url.scheme:\n url = 'http://' + url\n method = parsed_args.method or 'GET'\n result: dict[str, Any] = {'method': method.upper(), 'url': url}\n headers, cookies = _parse_headers_and_cookies(parsed_args)\n if headers:\n result['headers'] = headers\n if cookies:\n result['cookies'] = cookies\n if parsed_args.data:\n result['body'] = parsed_args.data\n if not parsed_args.method:\n result['method'] = 'POST'\n return result", + "docstring": "Convert a cURL command syntax to Request kwargs. :param str curl_command: string containing the curl command :param bool ignore_unknown_options: If true, only a warning is emitted when cURL options are unknown. Otherwise raises an error. (default: True) :return: dictionary of Request kwargs", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\curl.py", + "ast_data": "FunctionDef name:curl_to_request_kwargs arg:curl_command arg:ignore_unknown_options arguments arg arg Assign Call If Compare Raise Call Assign Call If Assign Call If Call Raise Call Assign Assign Call If Assign Assign BoolOp Call Assign Call If Assign If Assign If Assign If Assign Return return:yes" + }, + { + "library": "authlib", + "name": "check_verifier", + "source_code": "def check_verifier(self, verifier):\n raise NotImplementedError()", + "docstring": "A method to check if the given verifier matches this temporary credential. For instance that this temporary credential has recorded the value in database as column ``:: def check_verifier(self, verifier): return self.oauth_verifier == verifier :return: Boolean", + "type": "method", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py", + "ast_data": "FunctionDef name:check_verifier arg:self arg:verifier arguments arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_get_curve_scorer", + "source_code": "def _get_curve_scorer(self):\n scoring = check_scoring(self.estimator, scoring=self.scoring)\n curve_scorer = _CurveScorer.from_scorer(scoring, self._get_response_method(), self.thresholds)\n return curve_scorer", + "docstring": "Get the curve scorer based on the objective metric used.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py", + "ast_data": "FunctionDef name:_get_curve_scorer arg:self arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "Levy03", + "source_code": "class Levy03(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = [(-5, 5), (-5, 5)]\n self.global_optimum = [[1 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n y = 1 + (x - 1) / 4\n v = sum((y[:-1] - 1) ** 2 * (1 + 10 * sin(pi * y[1:]) ** 2))\n z = (y[-1] - 1) ** 2\n return sin(pi * y[0]) ** 2 + v + z", + "docstring": "Levy 3 objective function. This class defines the Levy 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Levy03}}(\\mathbf{x}) = \\sin^2(\\pi y_1)+\\sum_{i=1}^{n-1}(y_i-1)^2[1+10\\sin^2(\\pi y_{i+1})]+(y_n-1)^2 Where, in this exercise: .. math:: y_i=1+\\frac{x_i-1}{4} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO: not clear what the Levy function definition is. Gavana, Mishra, Adorio have different forms. Indeed Levy 3 docstring from Gavana disagrees with the Gavana code! The following code is from the Mishra listing of Levy08.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py", + "ast_data": "ClassDef name:Levy03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "forward_compatible", + "source_code": "@tf_export('compat.forward_compatible')\ndef forward_compatible(year, month, day):\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(year, month, day)", + "docstring": "Return true if the forward compatibility window has expired. See [Version compatibility]( Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The \"producer\" is typically a Python program that constructs and trains a model while the \"consumer\" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation is created with the intent of replacing the implementation of an existing Python wrapper - . The Python wrapper implementation should change from something like: to: Where , , and specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an . month: A month (1 <= month <= 12) in year. Must be an . day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an . Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compat\\compat.py", + "ast_data": "FunctionDef name:forward_compatible arg:year arg:month arg:day arguments arg arg arg Return return:yes Compare Call Call" + }, + { + "library": "numpy", + "name": "endswith", + "source_code": "@set_module('numpy.strings')\ndef endswith(a, suffix, start=0, end=None):\n end = end if end is not None else MAX\n return _endswith_ufunc(a, suffix, start, end)", + "docstring": "Returns a boolean array which is where the string element in `False`, stop comparing at that position. Returns ------- out : ndarray Output array of bools See Also -------- str.endswith Examples -------- >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> np.strings.endswith(s, 'ar') array([False, True]) >>> np.strings.endswith(s, 'a', start=1, end=2) array([False, True])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:endswith arg:a arg:suffix arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "numpy_dtype", + "source_code": "@property\ndef numpy_dtype(self) -> np.dtype:\n return self._dtype", + "docstring": "The NumPy dtype this NumpyEADtype wraps.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:numpy_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_weights_parameters", + "source_code": "def _check_weights_parameters(self):\n if self.weight_concentration_prior is None:\n self.weight_concentration_prior_ = 1.0 / self.n_components\n else:\n self.weight_concentration_prior_ = self.weight_concentration_prior", + "docstring": "Check the parameter of the Dirichlet distribution.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py", + "ast_data": "FunctionDef name:_check_weights_parameters arg:self arguments arg If Compare Assign Assign" + }, + { + "library": "tensorflow", + "name": "scatter_nd_add", + "source_code": "def scatter_nd_add(self, indices, updates, name=None):\n return self._lazy_read(gen_state_ops.resource_scatter_nd_add(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))", + "docstring": "Applies sparse addition to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:scatter_nd_add arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self, context_id):\n dist_autograd._is_valid_context(context_id)\n optimizer_step_func = _script_local_optimizer_step if self.is_functional_optim else _local_optimizer_step\n rpc_futs = [rpc.rpc_async(optimizer.owner(), optimizer_step_func, args=(optimizer, context_id)) for optimizer in self.remote_optimizers]\n _wait_for_all(rpc_futs)", + "docstring": "Performs a single optimization step. This will call :meth: on each worker containing parameters to be optimized, and will block until all workers return. The provided `~torch.distributed.autograd.context` that contains the gradients that should be applied to the parameters. Args: context_id: the autograd context id for which we should run the optimizer step.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\optimizer.py", + "ast_data": "FunctionDef name:step arg:self arg:context_id arguments arg arg Call Assign Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "create", + "source_code": "@property\ndef create(self):\n if not self._in_graph_mode:\n raise RuntimeError('This operation is not supported when eager execution is enabled.')\n return self._initializer_op", + "docstring": "The op responsible for initializing this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:create arg:self arguments arg If Raise Call Return return:yes" + }, + { + "library": "kornia", + "name": "image_list_to_tensor", + "source_code": "def image_list_to_tensor(images: List[Any]) -> Tensor:\n if not images:\n raise ValueError('Input list of numpy images is empty')\n if len(images[0].shape) != 3:\n raise ValueError('Input images must be three dimensional arrays')\n list_of_tensors: List[Tensor] = []\n for image in images:\n list_of_tensors.append(image_to_tensor(image))\n tensor: Tensor = torch.stack(list_of_tensors)\n return tensor", + "docstring": "Convert a list of numpy images to a PyTorch 4d tensor image. Args: images: list of images, each of the form :math:. Image shapes must be consistent Returns: tensor of the form :math:. Example: >>> imgs = [np.ones((4, 4, 1)), np.zeros((4, 4, 1))] >>> image_list_to_tensor(imgs).shape torch.Size([2, 1, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\utils\\image.py", + "ast_data": "FunctionDef name:image_list_to_tensor arg:images arguments arg If Raise Call If Compare Call Raise Call For Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_recursive_apply", + "source_code": "def _recursive_apply(tensors, apply_fn):\n tensors_type = type(tensors)\n if isinstance(tensors, tensor_lib.Tensor):\n return apply_fn(tensors)\n elif isinstance(tensors, variables.Variable):\n return apply_fn(tensors.value())\n elif isinstance(tensors, (list, tuple)):\n tensors = [_recursive_apply(t, apply_fn) for t in tensors]\n if tensors_type is list:\n return list(tensors)\n elif tensors_type is tuple:\n return tuple(tensors)\n return tensors_type(*tensors)\n elif tensors_type is dict:\n return dict(((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()))\n else:\n raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type {tensors_type!r}')", + "docstring": "Helper method to recursively apply a function to structure of tensors. The structure of the tensors should take the form similar to fetches in and includes single , , nested , , , or . Args: tensors: Single , , nested tuplenamedtupledictTensorTensorTypeError` if undefined type in the tensors structure.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py", + "ast_data": "FunctionDef name:_recursive_apply arg:tensors arg:apply_fn arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call Call If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call If Compare Return return:yes Call Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_TriangularSolve", + "source_code": "def _TriangularSolve(x, r):\n return _linalg.adjoint(linalg_ops.matrix_triangular_solve(r, _linalg.adjoint(x), lower=False, adjoint=False))", + "docstring": "Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_TriangularSolve arg:x arg:r arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, fn: Callable[[nn.Module], None]) -> 'FullyShardedDataParallel':\n uninitialized = self._is_root is None\n self._assert_state(TrainingState.IDLE)\n with _unshard_params_for_summon(self, self, writeback=True, rank0_only=False, offload_to_cpu=False, with_grads=False):\n ret = super().apply(fn)\n if uninitialized and self._is_root:\n for module in traversal_utils._get_fsdp_states(self):\n module._reset_lazy_init()\n return ret", + "docstring": "Apply `nn-init-docModule` -> None): function to be applied to each submodule Returns: Module: self", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:apply arg:self arg:fn arguments arg arg Assign Compare Call With Call Assign Call Call If BoolOp For Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "cpu_count", + "source_code": "def cpu_count(only_physical_cores=False):\n os_cpu_count = os.cpu_count() or 1\n if sys.platform == 'win32':\n os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)\n cpu_count_user = _cpu_count_user(os_cpu_count)\n aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)\n if not only_physical_cores:\n return aggregate_cpu_count\n if cpu_count_user < os_cpu_count:\n return max(cpu_count_user, 1)\n cpu_count_physical, exception = _count_physical_cores()\n if cpu_count_physical != 'not found':\n return cpu_count_physical\n if exception is not None:\n warnings.warn(f'Could not find the number of physical cores for the following reason:\\n{exception}\\nReturning the number of logical cores instead. You can silence this warning by setting LOKY_MAX_CPU_COUNT to the number of cores you want to use.', stacklevel=2)\n traceback.print_tb(exception.__traceback__)\n return aggregate_cpu_count", + "docstring": "Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by `` is True, return the number of physical cores instead of the number of logical cores (hyperthreading / SMT). Note that this option is not enforced if the number of usable cores is controlled in any other way such as: process affinity, Cgroup restricted CPU bandwidth or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical cores is not found, return the number of logical cores. Note that on Windows, the returned number of CPUs cannot exceed 61, see: It is also always larger or equal to 1.", + "type": "function", + "file_path": "scipy\\dev.py", + "ast_data": "FunctionDef name:cpu_count arg:only_physical_cores arguments arg Assign BoolOp Call If Compare Assign Call Assign Call Assign Call Call If Return return:yes If Compare Return return:yes Call Assign Call If Compare Return return:yes If Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "convert_frame", + "source_code": "def convert_frame(compiler_fn: CompilerFn, hooks: Hooks) -> ConvertFrame:\n return ConvertFrame(compiler_fn, hooks)", + "docstring": "Try to convert a frame into an FX graph, if error leave frame unmodified", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py", + "ast_data": "FunctionDef name:convert_frame arg:compiler_fn arg:hooks arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "conv_kernel_mask", + "source_code": "def conv_kernel_mask(input_shape, kernel_shape, strides, padding):\n if padding not in {'same', 'valid'}:\n raise NotImplementedError('Padding type %s not supported. Only \"valid\" and \"same\" are implemented.' % padding)\n in_dims = len(input_shape)\n if isinstance(kernel_shape, int):\n kernel_shape = (kernel_shape,) * in_dims\n if isinstance(strides, int):\n strides = (strides,) * in_dims\n kernel_dims = len(kernel_shape)\n stride_dims = len(strides)\n if kernel_dims != in_dims or stride_dims != in_dims:\n raise ValueError('Number of strides, input and kernel dimensions must all match. Received: %d, %d, %d.' % (stride_dims, in_dims, kernel_dims))\n output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)\n mask_shape = input_shape + output_shape\n mask = np.zeros(mask_shape, np.bool_)\n output_axes_ticks = [range(dim) for dim in output_shape]\n for output_position in itertools.product(*output_axes_ticks):\n input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding)\n for input_position in itertools.product(*input_axes_ticks):\n mask[input_position + output_position] = True\n return mask", + "docstring": "Compute a mask representing the connectivity of a convolution operation. Assume a convolution with given parameters is applied to an input having N spatial dimensions with to produce an output with shape . This method returns a boolean array of shape with entries indicating pairs of input and output locations that are connected by a weight. Example: >>> input_shape = (4,) >>> kernel_shape = (2,) >>> strides = (1,) >>> padding = \"valid\" >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding) array([[ True, False, False], [ True, True, False], [False, True, True], [False, False, True]]) where rows and columns correspond to inputs and outputs respectively. Args: input_shape: tuple of size N: , spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string or . means no padding. results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. Returns: A boolean 2N-D of shape , where is the spatial shape of the output. entries in the mask represent pairs of input-output locations that are connected by a weight. Raises: ValueError: if , and don't have the same number of dimensions. NotImplementedError: if is not in {, }.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py", + "ast_data": "FunctionDef name:conv_kernel_mask arg:input_shape arg:kernel_shape arg:strides arg:padding arguments arg arg arg arg If Compare Raise Call Assign Call If Call Assign If Call Assign Assign Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Assign Call Assign Call For Call Assign Call For Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "graph", + "source_code": "def graph(self):\n return self.__tx.output.graph", + "docstring": "Retrieve the partially constructed FX graph that would be passed to the user compiler after compilation.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "mark_non_differentiable", + "source_code": "def mark_non_differentiable(self, *args: torch.Tensor):\n self.non_differentiable = args", + "docstring": "Mark outputs as non-differentiable. This should be called at most once, in either the :func: or :func: methods, and all arguments should be tensor outputs. This will mark outputs as not requiring gradients, increasing the efficiency of backward computation. You still need to accept a gradient for each output in :meth:, but it's always going to be a zero tensor with the same shape as the shape of a corresponding output. This is used e.g. for indices returned from a sort. See example:: >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x): >>> sorted, idx = x.sort() >>> ctx.mark_non_differentiable(idx) >>> ctx.save_for_backward(x, idx) >>> return sorted, idx >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): # still need to accept g2 >>> x, idx = ctx.saved_tensors >>> grad_input = torch.zeros_like(x) >>> grad_input.index_add_(0, idx, g1) >>> return grad_input", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:mark_non_differentiable arg:self arguments arg arg Assign" + }, + { + "library": "numpy", + "name": "dot", + "source_code": "def dot(self, b, out=None, strict=False):\n return dot(self, b, out=out, strict=strict)", + "docstring": "a.dot(b, out=None) Masked dot product of two arrays. Note that and are located in different positions than in . In order to maintain compatibility with the functional version, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. Parameters ---------- b : masked_array_like Inputs array. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for . This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. See Also -------- numpy.ma.dot : equivalent function", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:dot arg:self arg:b arg:out arg:strict arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "bernoulli", + "source_code": "def bernoulli(n):\n if not isscalar(n) or n < 0:\n raise ValueError('n must be a non-negative integer.')\n n = int(n)\n if n < 2:\n n1 = 2\n else:\n n1 = n\n return _specfun.bernob(int(n1))[:n + 1]", + "docstring": "Bernoulli numbers B0..Bn (inclusive). Parameters ---------- n : int Indicated the number of terms in the Bernoulli series to generate. Returns ------- ndarray The Bernoulli numbers `bernoulli`.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:bernoulli arg:n arguments arg If BoolOp Call Compare Raise Call Assign Call If Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "masked", + "source_code": "@contextlib.contextmanager\ndef masked(self, mask):\n prior = self._load_mask\n if prior:\n mask = ops.and_(mask, prior)\n if isinstance(mask, OpsValue):\n mask = mask.value\n assert isinstance(mask, CppCSEVariable)\n mask.dtype = torch.bool\n self._load_mask = mask\n try:\n yield mask\n finally:\n self._load_mask = prior", + "docstring": "Context manager to add an additional mask to loads and stores.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py", + "ast_data": "FunctionDef name:masked arg:self arg:mask arguments arg arg Assign If Assign Call If Call Assign Call Assign Assign Try Assign" + }, + { + "library": "matplotlib", + "name": "PlaceHolderLayoutEngine", + "source_code": "class PlaceHolderLayoutEngine(LayoutEngine):\n\n def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs):\n self._adjust_compatible = adjust_compatible\n self._colorbar_gridspec = colorbar_gridspec\n super().__init__(**kwargs)\n\n def execute(self, fig):\n return", + "docstring": "This layout engine does not adjust the figure layout at all. The purpose of this is to act as a placeholder when the user removes a layout engine to ensure an incompatible cannot be set later. Parameters ---------- adjust_compatible, colorbar_gridspec : bool Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever layout engine it is replacing.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", + "ast_data": "ClassDef name:PlaceHolderLayoutEngine FunctionDef name:__init__ arg:self arg:adjust_compatible arg:colorbar_gridspec arguments arg arg arg arg Assign Assign Call Call FunctionDef name:execute arg:self arg:fig arguments arg arg Return return:no" + }, + { + "library": "matplotlib", + "name": "get_joinstyle", + "source_code": "def get_joinstyle(self):\n return self._joinstyle.name", + "docstring": "Return the joinstyle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_joinstyle arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "toms748", + "source_code": "def toms748(f, a, b, args=(), k=1, xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True):\n if xtol <= 0:\n raise ValueError(f'xtol too small ({xtol:g} <= 0)')\n if rtol < _rtol / 4:\n raise ValueError(f'rtol too small ({rtol:g} < {_rtol / 4:g})')\n maxiter = operator.index(maxiter)\n if maxiter < 1:\n raise ValueError('maxiter must be greater than 0')\n if not np.isfinite(a):\n raise ValueError(f'a is not finite {a}')\n if not np.isfinite(b):\n raise ValueError(f'b is not finite {b}')\n if a >= b:\n raise ValueError(f'a and b are not an interval [{a}, {b}]')\n if not k >= 1:\n raise ValueError(f'k too small ({k} < 1)')\n if not isinstance(args, tuple):\n args = (args,)\n f = _wrap_nan_raise(f)\n solver = TOMS748Solver()\n result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp)\n x, function_calls, iterations, flag = result\n return _results_select(full_output, (x, function_calls, iterations, flag), 'toms748')", + "docstring": "Find a root using TOMS Algorithm 748 method. Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a root of the function on the interval `f(b)ff(a)f(b)ffmaxiterfull_outputfull_outputxrRootResultsRootResultsfRootResultsffk`abs(x - x0) >> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> from scipy import optimize >>> root, results = optimize.toms748(f, 0, 2, full_output=True) >>> root 1.0 >>> results converged: True flag: converged function_calls: 11 iterations: 5 root: 1.0 method: toms748", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_zeros_py.py", + "ast_data": "FunctionDef name:toms748 arg:f arg:a arg:b arg:args arg:k arg:xtol arg:rtol arg:maxiter arg:full_output arg:disp arguments arg arg arg arg arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Call If Compare Raise Call If Call Raise Call If Call Raise Call If Compare Raise Call If Compare Raise Call If Call Assign Assign Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "character_backward_compatibility_hook", + "source_code": "def character_backward_compatibility_hook(item, parents, result, *args, **kwargs):\n parent_key, parent_value = parents[-1]\n key, value = item\n\n def fix_usage(varname, value):\n value = re.sub('[*]\\\\s*\\\\b' + varname + '\\\\b', varname, value)\n value = re.sub('\\\\b' + varname + '\\\\b\\\\s*[\\\\[]\\\\s*0\\\\s*[\\\\]]', varname, value)\n return value\n if parent_key in ['dimension', 'check']:\n assert parents[-3][0] == 'vars'\n vars_dict = parents[-3][1]\n elif key == '=':\n assert parents[-2][0] == 'vars'\n vars_dict = parents[-2][1]\n else:\n vars_dict = None\n new_value = None\n if vars_dict is not None:\n new_value = value\n for varname, vd in vars_dict.items():\n if ischaracter(vd):\n new_value = fix_usage(varname, new_value)\n elif key == 'callstatement':\n vars_dict = parents[-2][1]['vars']\n new_value = value\n for varname, vd in vars_dict.items():\n if ischaracter(vd):\n new_value = re.sub('(? `{new_value}`\\n', 1)\n return (key, new_value)", + "docstring": "Previously, Fortran character was incorrectly treated as character*1. This hook fixes the usage of the corresponding variables in , , , and expressions. The usage of in expression can be left unchanged because C is C typedef of , although, new implementations should use in the corresponding expressions. See for more information.", + "type": "function", + "file_path": "numpy\\numpy\\f2py\\crackfortran.py", + "ast_data": "FunctionDef name:character_backward_compatibility_hook arg:item arg:parents arg:result arguments arg arg arg arg arg Assign Assign FunctionDef name:fix_usage arg:varname arg:value arguments arg arg Assign Call Assign Call Return return:yes If Compare Compare Assign If Compare Compare Assign Assign Assign If Compare Assign For Call If Call Assign Call If Compare Assign Assign For Call If Call Assign Call If Compare If Compare Call Return return:yes" + }, + { + "library": "scipy", + "name": "_aberth", + "source_code": "def _aberth(f, fp, x0, tol=1e-15, maxiter=50):\n N = len(x0)\n x = array(x0, complex)\n beta = np.empty_like(x0)\n for iteration in range(maxiter):\n alpha = -f(x) / fp(x)\n for k in range(N):\n beta[k] = np.sum(1 / (x[k] - x[k + 1:]))\n beta[k] += np.sum(1 / (x[k] - x[:k]))\n x += alpha / (1 + alpha * beta)\n if not all(np.isfinite(x)):\n raise RuntimeError('Root-finding calculation failed')\n if all(abs(alpha) <= tol):\n break\n else:\n raise Exception('Zeros failed to converge')\n return x", + "docstring": "Given a function , its first derivative , and a set of initial guesses , simultaneously find the roots of the polynomial using the Aberth-Ehrlich method. `f`. (This is not a complete implementation of Bini's algorithm.)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:_aberth arg:f arg:fp arg:x0 arg:tol arg:maxiter arguments arg arg arg arg arg Assign Call Assign Call Assign Call For Call Assign Call Call For Call Assign Call Call If Call Call Raise Call If Call Compare Call Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "log_prob", + "source_code": "def log_prob(self, input: Tensor) -> Tensor:\n head_output = self.head(input)\n return self._get_full_log_prob(input, head_output)", + "docstring": "Compute log probabilities for all :math:. Args: input (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math: in range :math:, where :math: is a parameter passed to `(N, \\texttt{in\\_features})(N, \\texttt{n\\_classes})`", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\adaptive.py", + "ast_data": "FunctionDef name:log_prob arg:self arg:input arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_ica_par", + "source_code": "def _ica_par(X, tol, g, fun_args, max_iter, w_init):\n W = _sym_decorrelation(w_init)\n del w_init\n p_ = float(X.shape[1])\n for ii in range(max_iter):\n gwtx, g_wtx = g(np.dot(W, X), fun_args)\n W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)\n del gwtx, g_wtx\n lim = max(abs(abs(np.einsum('ij,ij->i', W1, W)) - 1))\n W = W1\n if lim < tol:\n break\n else:\n warnings.warn('FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.', ConvergenceWarning)\n return (W, ii + 1)", + "docstring": "Parallel FastICA. Used internally by FastICA --main loop", + "type": "function", + "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py", + "ast_data": "FunctionDef name:_ica_par arg:X arg:tol arg:g arg:fun_args arg:max_iter arg:w_init arguments arg arg arg arg arg arg Assign Call Assign Call For Call Assign Call Call Assign Call Call Assign Call Call Call Call Assign If Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "relu", + "source_code": "@dispatch.add_dispatch_support\ndef relu(x, alpha=0.0, max_value=None, threshold=0):\n return backend.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)", + "docstring": "Applies the rectified linear unit activation function. With default values, this returns the standard ReLU activation: , the element-wise maximum of 0 and the input tensor. Modifying default parameters allows you to use non-zero thresholds, change the max value of the activation, and to use a non-zero multiple of the input for values below the threshold. For example: >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32) >>> tf.keras.activations.relu(foo).numpy() array([ 0., 0., 0., 5., 10.], dtype=float32) >>> tf.keras.activations.relu(foo, alpha=0.5).numpy() array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32) >>> tf.keras.activations.relu(foo, max_value=5).numpy() array([0., 0., 0., 5., 5.], dtype=float32) >>> tf.keras.activations.relu(foo, threshold=5).numpy() array([-0., -0., 0., 0., 10.], dtype=float32) Args: x: Input or . alpha: A that governs the slope for values lower than the threshold. max_value: A that sets the saturation threshold (the largest value the function will return). threshold: A giving the threshold value of the activation function below which values will be damped or set to zero. Returns: A representing the input tensor, transformed by the relu activation function. Tensor will be of the same shape and dtype of input .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", + "ast_data": "FunctionDef name:relu arg:x arg:alpha arg:max_value arg:threshold arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "LinAlgError", + "source_code": "@set_module('numpy.linalg')\nclass LinAlgError(ValueError):\n pass", + "docstring": "Generic Python-exception-derived object raised by linalg functions. General purpose exception class, derived from Python's ValueError class, programmatically raised in linalg functions when a Linear Algebra-related condition would prevent further correct execution of the function. Parameters ---------- None Examples -------- >>> from numpy import linalg as LA >>> LA.inv(np.zeros((2,2))) Traceback (most recent call last): File \"\", line 1, in File \"...linalg.py\", line 350, in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) File \"...linalg.py\", line 249, in solve raise LinAlgError('Singular matrix') numpy.linalg.LinAlgError: Singular matrix", + "type": "class", + "file_path": "numpy\\numpy\\linalg\\_linalg.py", + "ast_data": "ClassDef name:LinAlgError Call" + }, + { + "library": "tensorflow", + "name": "_parse_input_meta_graph_proto", + "source_code": "def _parse_input_meta_graph_proto(input_graph: str, input_binary: bool) -> meta_graph_pb2.MetaGraphDef:\n if not gfile.Exists(input_graph):\n raise IOError(\"Input meta graph file '\" + input_graph + \"' does not exist!\")\n input_meta_graph_def = meta_graph_pb2.MetaGraphDef()\n mode = 'rb' if input_binary else 'r'\n with gfile.GFile(input_graph, mode) as f:\n if input_binary:\n input_meta_graph_def.ParseFromString(f.read())\n else:\n text_format.Merge(f.read(), input_meta_graph_def)\n print(\"Loaded meta graph file '\" + input_graph)\n return input_meta_graph_def", + "docstring": "Parses input tensorflow graph into MetaGraphDef proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\freeze_graph.py", + "ast_data": "FunctionDef name:_parse_input_meta_graph_proto arg:input_graph arg:input_binary arguments arg arg If Call Raise Call Assign Call Assign With Call If Call Call Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_copy_except__document", + "source_code": "def _copy_except__document(el: Element) -> Element:\n newnode = object.__new__(el.__class__)\n newnode.children = []\n newnode.rawsource = el.rawsource\n newnode.tagname = el.tagname\n newnode.attributes = {k: v if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'} else v[:] for k, v in el.attributes.items()}\n newnode.line = el.line\n newnode.source = el.source\n return newnode", + "docstring": "Monkey-patch to not copy the `` attribute. See:", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\nodes.py", + "ast_data": "FunctionDef name:_copy_except__document arg:el arguments arg Assign Call Assign Assign Assign Assign Compare Call Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "cumulative_prod", + "source_code": "@array_function_dispatch(_cumulative_prod_dispatcher)\ndef cumulative_prod(x, /, *, axis=None, dtype=None, out=None, include_initial=False):\n return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial)", + "docstring": "Return the cumulative product of elements along a given axis. This function is an Array API compatible alternative to . Parameters ---------- x : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. The default (None) is only allowed for one-dimensional arrays. For arrays with more than one dimension `ufuncs-output-type`: >>> np.cumulative_prod(b, axis=1) array([[ 1, 2, 6], [ 4, 20, 120]])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:cumulative_prod arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_create_dataset_reader", + "source_code": "def _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None, name=None):\n\n def read_one_file(filename):\n filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n return dataset_creator(filename)\n if num_parallel_reads is None:\n return filenames.flat_map(read_one_file, name=name)\n elif num_parallel_reads == dataset_ops.AUTOTUNE:\n return filenames.interleave(read_one_file, num_parallel_calls=num_parallel_reads, name=name)\n else:\n return ParallelInterleaveDataset(filenames, read_one_file, cycle_length=num_parallel_reads, block_length=1, sloppy=False, buffer_output_elements=None, prefetch_input_elements=None, name=name)", + "docstring": "Creates a dataset that reads the given files using the given reader. Args: dataset_creator: A function that takes in a single file name and returns a dataset. filenames: A containing one or more filenames. num_parallel_reads: The number of parallel reads we should do. name: (Optional.) A name for the tf.data operation. Returns: A that reads data from .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "FunctionDef name:_create_dataset_reader arg:dataset_creator arg:filenames arg:num_parallel_reads arg:name arguments arg arg arg arg FunctionDef name:read_one_file arg:filename arguments arg Assign Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "test", + "source_code": "@click.option('-m', 'markexpr', metavar='MARKEXPR', default=default, help='Run tests with the given markers')\n@spin.util.extend_command(spin.cmds.meson.test)\ndef test(*, parent_callback, pytest_args, tests, markexpr, **kwargs):\n if not pytest_args and (not tests):\n pytest_args = ('--pyargs', 'numpy')\n if '-m' not in pytest_args:\n if markexpr != 'full':\n pytest_args = ('-m', markexpr) + pytest_args\n kwargs['pytest_args'] = pytest_args\n parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs})", + "docstring": "By default, spin will run . To run the full test suite, use", + "type": "function", + "file_path": "numpy\\.spin\\cmds.py", + "ast_data": "FunctionDef name:test arguments arg arg arg arg arg If BoolOp Assign If Compare If Compare Assign Assign Call Call Call" + }, + { + "library": "django", + "name": "get_meta", + "source_code": "def get_meta(self):\n if self.model:\n return self.model._meta", + "docstring": "Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:get_meta arg:self arguments arg If Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, path, parent, children):\n if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:\n raise RuntimeError('Modules nested too deep:\\n%s.%s\\n\\nThis is likely a problem with an accidental public import.' % (self._root_name, path))\n full_path = '.'.join([self._root_name, path]) if path else self._root_name\n for name, child in list(children):\n if self._is_private(full_path, name, child):\n children.remove((name, child))\n self._visitor(path, parent, children)\n for name, child in list(children):\n if self._do_not_descend(full_path, name):\n children.remove((name, child))", + "docstring": "Visitor interface, see for details.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:path arg:parent arg:children arguments arg arg arg arg If BoolOp Call Compare Call Call Raise Call Assign Call For Call If Call Call Call For Call If Call Call" + }, + { + "library": "tensorflow", + "name": "TestOneInput", + "source_code": "def TestOneInput(data):\n fh = FuzzingHelper(data)\n input_tensor = fh.get_random_numeric_tensor()\n _ = tf.raw_ops.Acos(x=input_tensor)", + "docstring": "Test randomized fuzzing input for tf.raw_ops.Acos.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\acos_fuzz.py", + "ast_data": "FunctionDef name:TestOneInput arg:data arguments arg Assign Call Assign Call Assign Call" + }, + { + "library": "django", + "name": "update_catalogs", + "source_code": "def update_catalogs(resources=None, languages=None, verbosity=0):\n settings.configure()\n django.setup()\n if resources is not None:\n print('`update_catalogs` will always process all resources.')\n contrib_dirs = _get_locale_dirs(None, include_core=False)\n os.chdir(os.path.join(os.getcwd(), 'django'))\n print('Updating en catalogs for Django and contrib apps...')\n call_command('makemessages', locale=['en'], verbosity=verbosity)\n print('Updating en JS catalogs for Django and contrib apps...')\n call_command('makemessages', locale=['en'], domain='djangojs', verbosity=verbosity)\n _check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))\n for name, dir_ in contrib_dirs:\n _check_diff(name, dir_)", + "docstring": "Update the en/LC_MESSAGES/django.po (main and contrib) files with new/updated translatable strings.", + "type": "function", + "file_path": "django\\scripts\\manage_translations.py", + "ast_data": "FunctionDef name:update_catalogs arg:resources arg:languages arg:verbosity arguments arg arg arg Call Call If Compare Call Assign Call Call Call Call Call Call Call Call Call Call Call For Call" + }, + { + "library": "matplotlib", + "name": "grid", + "source_code": "def grid(self, visible=None, which='major', axis='both', **kwargs):\n super().grid(visible, which=which, axis=axis, **kwargs)\n if not self._axisline_on:\n return\n if visible is None:\n visible = self.axes.xaxis._minor_tick_kw['gridOn'] or self.axes.xaxis._major_tick_kw['gridOn'] or self.axes.yaxis._minor_tick_kw['gridOn'] or self.axes.yaxis._major_tick_kw['gridOn']\n self.gridlines.set(which=which, axis=axis, visible=visible)\n self.gridlines.set(**kwargs)", + "docstring": "Toggle the gridlines, and optionally set the properties of the lines.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py", + "ast_data": "FunctionDef name:grid arg:self arg:visible arg:which arg:axis arguments arg arg arg arg arg Call Call If Return return:no If Compare Assign BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "quantize_per_tensor_tensor2", + "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_tensor.tensor2', 'CompositeExplicitAutograd')\ndef quantize_per_tensor_tensor2(input: torch.Tensor, scale: torch.Tensor, zero_point: torch.Tensor, quant_min: torch.Tensor, quant_max: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:\n assert zero_point.numel() == 1, f'Expecting zero_point tensor to be one element, but received : {zero_point.numel()}'\n assert scale.numel() == 1, f'Expecting scale tensor to be one element, but received : {scale.numel()}'\n return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)", + "docstring": "Affine quantization for the Tensor using the same quantization parameters to map from floating point to quantized values Same as but scale and zero_point are Scalar Tensor instead of scalar values", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py", + "ast_data": "FunctionDef name:quantize_per_tensor_tensor2 arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg Compare Call Call Compare Call Call Return return:yes Call Call Call Call Call Call" + }, + { + "library": "scrapy", + "name": "handle_spider_output_async", + "source_code": "async def handle_spider_output_async(self, result: Iterable[_T] | AsyncIterator[_T], request: Request, response: Response) -> None:\n if isinstance(result, AsyncIterator):\n ait = aiter_errback(result, self.handle_spider_error, request, response)\n await maybe_deferred_to_future(parallel_async(ait, self.concurrent_items, self._process_spidermw_output, response))\n return\n it = iter_errback(result, self.handle_spider_error, request, response)\n await maybe_deferred_to_future(parallel(it, self.concurrent_items, self._process_spidermw_output, response))", + "docstring": "Pass items/requests produced by a callback to `` in parallel.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\scraper.py", + "ast_data": "AsyncFunctionDef name:handle_spider_output_async arg:self arg:result arg:request arg:response arguments arg arg arg arg If Call Assign Call Call Call Return return:no Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "UniformUnitScaling", + "source_code": "@tf_export(v1=['initializers.uniform_unit_scaling', 'uniform_unit_scaling_initializer'])\n@deprecation.deprecated_endpoints('uniform_unit_scaling_initializer', 'initializers.uniform_unit_scaling')\nclass UniformUnitScaling(Initializer):\n\n @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n @deprecated(None, 'Use tf.initializers.variance_scaling instead with distribution=uniform to get equivalent behavior.')\n def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):\n self.factor = factor\n self.seed = seed\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n scale_shape = shape\n if partition_info is not None:\n scale_shape = partition_info.full_shape\n input_size = 1.0\n for dim in scale_shape[:-1]:\n input_size *= float(dim)\n input_size = max(input_size, 1.0)\n max_val = math.sqrt(3 / input_size) * self.factor\n return random_ops.random_uniform(shape, -max_val, max_val, dtype, seed=self.seed)\n\n def get_config(self):\n return {'factor': self.factor, 'seed': self.seed, 'dtype': self.dtype.name}", + "docstring": "Initializer that generates tensors without scaling variance. When initializing a deep network, it is in principle advantageous to keep the scale of the input variance constant, so it does not explode or diminish by reaching the final layer. If the input is and the operation , and we want to initialize uniformly at random, we need to pick from [-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)] to keep the scale intact, where (the size of the input). A similar calculation for convolutional networks gives an analogous result with equal to the product of the first 3 dimensions. When nonlinearities are present, we need to multiply this by a constant . See (Sussillo et al., 2014) for deeper motivation, experiments and the calculation of constants. In section 2.3 there, the constants were numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15. Args: factor: Float. A multiplicative factor by which the values will be scaled. seed: A Python integer. Used to create random seeds. See for behavior. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported. References: [Sussillo et al., 2014]( ([pdf](", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "ClassDef name:UniformUnitScaling FunctionDef name:__init__ arg:self arg:factor arg:seed arg:dtype arguments arg arg arg arg Assign Assign Assign Call Call Call Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg If Compare Assign Assign If Compare Assign Assign For Call Assign Call Assign Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "encode", + "source_code": "def encode(pos, b_box):\n return (pos[0] < b_box.left) * LEFT_EDGE + (pos[0] > b_box.right) * RIGHT_EDGE + (pos[1] < b_box.top) * TOP_EDGE + (pos[1] > b_box.bottom) * BOTTOM_EDGE", + "docstring": "returns a code that defines position with respect to a bounding box", + "type": "function", + "file_path": "pygame\\src_py\\draw_py.py", + "ast_data": "FunctionDef name:encode arg:pos arg:b_box arguments arg arg Return return:yes Compare Compare Compare Compare" + }, + { + "library": "kornia", + "name": "so3", + "source_code": "@property\ndef so3(self) -> So3:\n return self._rotation", + "docstring": "Return the underlying rotation(So3).", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:so3 arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "_build_num_ray_dict_of_points2d", + "source_code": "@staticmethod\ndef _build_num_ray_dict_of_points2d(points2d_as_flat_tensors: Dict[int, Points2D_FlatTensors]) -> Dict[int, Points2D]:\n num_ray_dict_of_points2d: Dict[int, RaySampler.Points2D] = {}\n for n, points2d_as_flat_tensor in points2d_as_flat_tensors.items():\n num_cams = len(points2d_as_flat_tensor._camera_ids)\n points_2d = torch.stack((points2d_as_flat_tensor._x, points2d_as_flat_tensor._y)).permute(1, 0).reshape(num_cams, -1, 2)\n num_ray_dict_of_points2d[n] = RaySampler.Points2D(points_2d, points2d_as_flat_tensor._camera_ids)\n return num_ray_dict_of_points2d", + "docstring": "Build a dictionary of ray pixel points, by total number of rays as key. The dictionary groups rays by the total amount of rays, which allows the case of casting different number of rays from each scene camera. Args: points2d_as_flat_tensors: dictionary of pixel coordinates grouped by total number of rays: Dict[int, Points2D_FlatTensors] Returns: dictionary of Points2D objects that holds information on pixel 2d coordinates of each ray and the camera id it was casted by: Dict[int, Points2D]", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\samplers.py", + "ast_data": "FunctionDef name:_build_num_ray_dict_of_points2d arg:points2d_as_flat_tensors arguments arg For Call Assign Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_pop", + "source_code": "def _pop(self, key, indices=None, name=None):\n if name is None:\n name = '%s_get' % self._name\n indices, dtypes = self._get_indices_and_dtypes(indices)\n with ops.colocate_with(self._coloc_op):\n result = self._pop_fn(key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n return (key, self._get_return_value(result, indices))", + "docstring": "Remove and return the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:_pop arg:self arg:key arg:indices arg:name arguments arg arg arg arg If Compare Assign Assign Call With Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "unregister_layer", + "source_code": "def unregister_layer(self, name):\n self.data_groups[name]['hook'].remove()\n self.state.pop(name)\n self.data_groups.pop(name)", + "docstring": "Detaches the sparsifier from the layer", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:unregister_layer arg:self arg:name arguments arg arg Call Call Call" + }, + { + "library": "matplotlib", + "name": "intervaly", + "source_code": "@property\ndef intervaly(self):\n return self.get_points()[:, 1]", + "docstring": "The pair of *y* coordinates that define the bounding box. This is not guaranteed to be sorted from bottom to top.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:intervaly arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_values", + "source_code": "@property\ndef _values(self):\n raise NotImplementedError('Abstract method')", + "docstring": "An iterable/sequence which may contain trackable objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:_values arg:self arguments arg Raise Call" + }, + { + "library": "scipy", + "name": "_pomeranz_compute_j1j2", + "source_code": "def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):\n if i == 0:\n j1, j2 = (-ll - ceilf - 1, ll + ceilf - 1)\n else:\n ip1div2, ip1mod2 = divmod(i + 1, 2)\n if ip1mod2 == 0:\n if ip1div2 == n + 1:\n j1, j2 = (n - ll - ceilf - 1, n + ll + ceilf - 1)\n else:\n j1, j2 = (ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1)\n else:\n j1, j2 = (ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1)\n return (max(j1 + 2, 0), min(j2, n))", + "docstring": "Compute the endpoints of the interval for row i.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_ksstats.py", + "ast_data": "FunctionDef name:_pomeranz_compute_j1j2 arg:i arg:n arg:ll arg:ceilf arg:roundf arguments arg arg arg arg arg If Compare Assign Assign Call If Compare If Compare Assign Assign Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ThreadingOptions", + "source_code": "@deprecation.deprecated_endpoints('data.experimental.ThreadingOptions')\n@tf_export('data.experimental.ThreadingOptions', 'data.ThreadingOptions')\nclass ThreadingOptions(options_lib.OptionsBase):\n max_intra_op_parallelism = options_lib.create_option(name='max_intra_op_parallelism', ty=int, docstring='If set, it overrides the maximum degree of intra-op parallelism.')\n private_threadpool_size = options_lib.create_option(name='private_threadpool_size', ty=int, docstring='If set, the dataset will use a private threadpool of the given size. The value 0 can be used to indicate that the threadpool size should be determined at runtime based on the number of available CPU cores.')\n\n def _to_proto(self):\n pb = dataset_options_pb2.ThreadingOptions()\n if self.max_intra_op_parallelism is not None:\n pb.max_intra_op_parallelism = self.max_intra_op_parallelism\n if self.private_threadpool_size is not None:\n pb.private_threadpool_size = self.private_threadpool_size\n return pb\n\n def _from_proto(self, pb):\n if pb.WhichOneof('optional_max_intra_op_parallelism') is not None:\n self.max_intra_op_parallelism = pb.max_intra_op_parallelism\n if pb.WhichOneof('optional_private_threadpool_size') is not None:\n self.private_threadpool_size = pb.private_threadpool_size", + "docstring": "Represents options for dataset threading. You can set the threading options of a dataset through the property of ; the property is an instance of .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py", + "ast_data": "ClassDef name:ThreadingOptions Assign Call Assign Call FunctionDef name:_to_proto arg:self arguments arg Assign Call If Compare Assign If Compare Assign Return return:yes FunctionDef name:_from_proto arg:self arg:pb arguments arg arg If Compare Call Assign If Compare Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "get_unique_name_wrt", + "source_code": "def get_unique_name_wrt(prefix: str, *containers, requires_suffix=False) -> str:\n if not requires_suffix and (not is_in(prefix, *containers)):\n return prefix\n for i in itertools.count():\n candidate = f'{prefix}_{i}'\n if not is_in(candidate, *containers):\n return candidate\n raise AssertionError('unreachable')", + "docstring": "Return a name that starts with and is not in any of the (e.g., map, set).", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:get_unique_name_wrt arg:prefix arguments arg arg arg If BoolOp Call Return return:yes For Call Assign If Call Return return:yes Raise Call" + }, + { + "library": "django", + "name": "attr_value", + "source_code": "def attr_value(self, target, index=0):\n if not isinstance(target, str) or not isinstance(index, int):\n raise TypeError\n return capi.get_attr_value(self.ptr, force_bytes(target), index)", + "docstring": "The attribute value for the given target node (e.g. 'PROJCS'). The index keyword specifies an index of the child node to return.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:attr_value arg:self arg:target arg:index arguments arg arg arg If BoolOp Call Call Raise Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_minimum_flops", + "source_code": "@ops.RegisterStatistics('Minimum', 'flops')\ndef _minimum_flops(graph, node):\n return _binary_per_element_op_flops(graph, node)", + "docstring": "Compute flops for Minimum operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_minimum_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "GuardsCheckpointState", + "source_code": "class GuardsCheckpointState:\n dynamo_guards: set[Guard] = set()\n\n def __init__(self, dynamo_guards):\n self.dynamo_guards = dynamo_guards\n\n def diff(self, other):\n r = self.dynamo_guards.difference(other.dynamo_guards)\n if len(r) == 0:\n return None\n return r\n\n def __eq__(self, other):\n return self.diff(other) is None", + "docstring": "The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext", + "type": "class", + "file_path": "pytorch\\torch\\_guards.py", + "ast_data": "ClassDef name:GuardsCheckpointState Call FunctionDef name:__init__ arg:self arg:dynamo_guards arguments arg arg Assign FunctionDef name:diff arg:self arg:other arguments arg arg Assign Call If Compare Call Return return:no Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call" + }, + { + "library": "django", + "name": "default_key_func", + "source_code": "def default_key_func(key, key_prefix, version):\n return '%s:%s:%s' % (key_prefix, version, key)", + "docstring": "Default function to generate keys. Construct the key used by all other methods. By default, prepend the . KEY_FUNCTION can be used to specify an alternate function with custom key making behavior.", + "type": "function", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:default_key_func arg:key arg:key_prefix arg:version arguments arg arg arg Return return:yes" + }, + { + "library": "django", + "name": "render", + "source_code": "def render(self, context):\n template = self.template.resolve(context)\n if not callable(getattr(template, 'render', None)):\n template_name = template or ()\n if isinstance(template_name, str):\n template_name = (construct_relative_path(self.origin.template_name, template_name),)\n else:\n template_name = tuple(template_name)\n cache = context.render_context.dicts[0].setdefault(self, {})\n template = cache.get(template_name)\n if template is None:\n template = context.template.engine.select_template(template_name)\n cache[template_name] = template\n elif hasattr(template, 'template'):\n template = template.template\n values = {name: var.resolve(context) for name, var in self.extra_context.items()}\n if self.isolated_context:\n return template.render(context.new(values))\n with context.push(**values):\n return template.render(context)", + "docstring": "Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.", + "type": "method", + "file_path": "django\\django\\template\\loader_tags.py", + "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg Assign Call If Call Call Assign BoolOp If Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign If Call Assign Assign Call Call If Return return:yes Call Call With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_add_kernel_input", + "source_code": "def _add_kernel_input(self, name: str):\n return self.kernel.args.input(name)", + "docstring": "Add name as input to kernel and return input ref.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:_add_kernel_input arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_ttest_finish", + "source_code": "def _ttest_finish(df, t, alternative):\n if alternative == 'less':\n pval = special._ufuncs.stdtr(df, t)\n elif alternative == 'greater':\n pval = special._ufuncs.stdtr(df, -t)\n elif alternative == 'two-sided':\n pval = special._ufuncs.stdtr(df, -np.abs(t)) * 2\n else:\n raise ValueError(\"alternative must be 'less', 'greater' or 'two-sided'\")\n if t.ndim == 0:\n t = t[()]\n if pval.ndim == 0:\n pval = pval[()]\n return (t, pval)", + "docstring": "Common code between all 3 t-test functions.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:_ttest_finish arg:df arg:t arg:alternative arguments arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Raise Call If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "Zeros", + "source_code": "@tf_export(v1=['initializers.zeros', 'zeros_initializer'])\n@deprecation.deprecated_endpoints('initializers.zeros')\nclass Zeros(Initializer):\n\n @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n def __init__(self, dtype=dtypes.float32):\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return array_ops.zeros(shape, dtype)\n\n def get_config(self):\n return {'dtype': self.dtype.name}", + "docstring": "Initializer that generates tensors initialized to 0. @compatibility(TF2) is compatible with eager execution and . To migrate to TF2, please use instead. The argument in does not exist in . However, you can specify the in in both cases. #### Structural Mapping to TF2 Before: After: #### How to Map Arguments | TF1 Arg Name | TF2 Arg Name | Note | | :------------------- | :--------------- | :------------------------- | | | | In method | | | - | ( arg in TF1) Not supported | #### Before & After Usage Example Before: >>> initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32) >>> tf.Variable(initializer(shape=[3])).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3])).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) >>> initializer = tf.compat.v1.zeros_initializer() >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) After: >>> initializer = tf.zeros_initializer() >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) @end_compatibility", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "ClassDef name:Zeros FunctionDef name:__init__ arg:self arg:dtype arguments arg arg Assign Call Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "pbvv_seq", + "source_code": "def pbvv_seq(v, x):\n if not (isscalar(v) and isscalar(x)):\n raise ValueError('arguments must be scalars.')\n n = int(v)\n v0 = v - n\n if n <= 1:\n n1 = 1\n else:\n n1 = n\n v1 = n1 + v0\n dv, dp, pdf, pdd = _specfun.pbvv(v1, x)\n return (dv[:n1 + 1], dp[:n1 + 1])", + "docstring": "Parabolic cylinder functions Vv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996, chapter 13.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:pbvv_seq arg:v arg:x arguments arg arg If BoolOp Call Call Raise Call Assign Call Assign If Compare Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "split_result_tensors", + "source_code": "def split_result_tensors(result: torch.Tensor, inputs: list[torch.Tensor]) -> tuple[torch.Tensor, ...]:\n if isinstance(result, torch.fx.Proxy):\n splits = [0] * len(inputs)\n else:\n splits = [x.shape[0] for x in inputs]\n return torch.split(result, splits)", + "docstring": "A free function for use in the merge_matmul graph transformation below that splits the output from a merged matmul into the individual results for each input tensor. Arguments: result: The merged matmul result tensor. inputs: The list of inputs that were merged into one for the matmul. Returns: List of matmul results for each input tensor.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\merge_matmul.py", + "ast_data": "FunctionDef name:split_result_tensors arg:result arg:inputs arguments arg arg If Call Assign Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "print_results", + "source_code": "def print_results(self):\n results = self.average_results()\n results = sorted(results, key=lambda x: (x.nfail, x.mean_time))\n if not results:\n return\n print('')\n print('=========================================================')\n print(f'Optimizer benchmark: {self.function_name}')\n print(f'dimensions: {results[0].ndim}, extra kwargs: {str(self.minimizer_kwargs)}')\n print(f'averaged over {results[0].ntrials} starting configurations')\n print(' Optimizer nfail nfev njev nhev time')\n print('---------------------------------------------------------')\n for res in results:\n print(f'{res.name:11s} | {res.nfail:4d} | {res.mean_nfev:4d} | {res.mean_njev:4d} | {res.mean_nhev:4d} | {res.mean_time:.6g}')", + "docstring": "print the current list of results", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py", + "ast_data": "FunctionDef name:print_results arg:self arguments arg Assign Call Assign Call arguments arg If Return return:no Call Call Call Call Call Call Call Call For Call" + }, + { + "library": "pytorch", + "name": "_pipelined_all_gather_and_consume", + "source_code": "def _pipelined_all_gather_and_consume(shard: torch.Tensor, shard_consumer: Callable[[torch.Tensor, int], None], ag_out: torch.Tensor, group_name: str, ag_out_needed: bool=True) -> None:\n\n def adapter(shard: list[torch.Tensor], rank: int) -> None:\n shard_consumer(shard[0], rank)\n _pipelined_multi_all_gather_and_consume([shard], adapter, [ag_out], group_name, ag_out_needed)", + "docstring": "Perform the following logic with micro-pipelined computation and communication: ag_out = all_gather_tensor(shard, gather_dim=0, group=group) shards = ag_out.chunk(group.size()) for src_rank, shard in enumerate(shards): shard_consumer(shard, src_rank)", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py", + "ast_data": "FunctionDef name:_pipelined_all_gather_and_consume arg:shard arg:shard_consumer arg:ag_out arg:group_name arg:ag_out_needed arguments arg arg arg arg arg FunctionDef name:adapter arg:shard arg:rank arguments arg arg Call Call" + }, + { + "library": "django", + "name": "change_list_object_tools_tag", + "source_code": "@register.tag(name='change_list_object_tools')\ndef change_list_object_tools_tag(parser, token):\n return InclusionAdminNode(parser, token, func=lambda context: context, template_name='change_list_object_tools.html')", + "docstring": "Display the row of change list object tools.", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py", + "ast_data": "FunctionDef name:change_list_object_tools_tag arg:parser arg:token arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "merge_caches_on_tpu", + "source_code": "def merge_caches_on_tpu(self, local_tpu_cache_tensor):\n x = array_ops.broadcast_to(local_tpu_cache_tensor, shape=[self._tt_config.num_replicas] + local_tpu_cache_tensor.shape.as_list())\n if tensor_tracer_flags.TT_SINGLE_CORE_SUMMARIES.value:\n return x\n return tpu_ops.all_to_all(x, concat_dimension=0, split_dimension=0, split_count=self._tt_config.num_replicas, group_assignment=[list(range(self._tt_config.num_replicas))])", + "docstring": "Merges the given caches on tpu. Args: local_tpu_cache_tensor: A local tensor that needs to be merged by concanting data from other tpu cores. Returns: A merged tf.Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:merge_caches_on_tpu arg:self arg:local_tpu_cache_tensor arguments arg arg Assign Call Call If Return return:yes Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "to_dlpack", + "source_code": "def to_dlpack(self) -> DLPack:\n return to_dlpack(self.data)", + "docstring": "Return a DLPack capsule from the image tensor.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:to_dlpack arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X, y=None, groups=None):\n return super().split(X, y, groups)", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting to an integer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, values, row_splits):\n if not (isinstance(row_splits, (np.ndarray, np.generic)) and row_splits.dtype in (np.int64, np.int32) and (row_splits.ndim == 1)):\n raise TypeError('row_splits must be a 1D int32 or int64 numpy array')\n if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):\n raise TypeError('values must be a numpy array or a RaggedTensorValue')\n if isinstance(values, RaggedTensorValue) and row_splits.dtype != values.row_splits.dtype:\n raise ValueError('row_splits and values.row_splits must have the same dtype')\n self._values = values\n self._row_splits = row_splits", + "docstring": "Creates a . Args: values: A numpy array of any type and shape; or a RaggedTensorValue. row_splits: A 1-D int32 or int64 numpy array.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:values arg:row_splits arguments arg arg arg If BoolOp Call Compare Compare Raise Call If Call Raise Call If BoolOp Call Compare Raise Call Assign Assign" + }, + { + "library": "scipy", + "name": "default_xp", + "source_code": "@contextmanager\ndef default_xp(xp: ModuleType) -> Generator[None, None, None]:\n token = _default_xp_ctxvar.set(xp)\n try:\n yield\n finally:\n _default_xp_ctxvar.reset(token)", + "docstring": "In all `xp` is the namespace for the desired array (the second parameter of the tests).", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_array_api.py", + "ast_data": "FunctionDef name:default_xp arg:xp arguments arg Assign Call Try Call" + }, + { + "library": "tensorflow", + "name": "convert_n_to_tensor", + "source_code": "def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None) -> list[Union[EagerTensor, SymbolicTensor]]:\n return internal_convert_n_to_tensor(values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False)", + "docstring": "Converts to a list of objects. Args: values: A list of objects that can be consumed by . dtype: (Optional.) The required of the returned objects. name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A list of and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:convert_n_to_tensor arg:values arg:dtype arg:name arg:preferred_dtype arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "date_extract_sql", + "source_code": "def date_extract_sql(self, lookup_type, sql, params):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')", + "docstring": "Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:date_extract_sql arg:self arg:lookup_type arg:sql arg:params arguments arg arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "RandomVerticalFlip", + "source_code": "class RandomVerticalFlip(GeometricAugmentationBase2D):\n\n def compute_transformation(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any]) -> Tensor:\n h: int = int(params['forward_input_shape'][-2])\n flip_mat: Tensor = tensor([[1, 0, 0], [0, -1, h - 1], [0, 0, 1]], device=input.device, dtype=input.dtype)\n return flip_mat.expand(input.shape[0], 3, 3)\n\n def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n return vflip(input)\n\n def inverse_transform(self, input: Tensor, flags: Dict[str, Any], transform: Optional[Tensor]=None, size: Optional[Tuple[int, int]]=None) -> Tensor:\n if not isinstance(transform, Tensor):\n raise TypeError(f'Expected the `transform` be a Tensor. Got {type(transform)}.')\n return self.apply_transform(input, params=self._params, transform=as_tensor(transform, device=input.device, dtype=input.dtype), flags=flags)", + "docstring": "Apply a random vertical flip to a tensor image or a batch of tensor images with a given probability. .. image:: _static/img/RandomVerticalFlip.png Args: p: probability of the image being flipped. same_on_batch: apply the same transformation across the batch. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Shape: - Input: :math: or :math:, Optional: :math: - Output: :math: .. note:: This function internally uses :func:. Examples: >>> import torch >>> input = torch.tensor([[[[0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.]]]]) >>> seq = RandomVerticalFlip(p=1.0) >>> seq(input), seq.transform_matrix (tensor([[[[0., 1., 1.], [0., 0., 0.], [0., 0., 0.]]]]), tensor([[[ 1., 0., 0.], [ 0., -1., 2.], [ 0., 0., 1.]]])) >>> seq.inverse(seq(input)).equal(input) True To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> seq = RandomVerticalFlip(p=1.0) >>> (seq(input) == seq(input, params=seq._params)).all() tensor(True)", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\vertical_flip.py", + "ast_data": "ClassDef name:RandomVerticalFlip FunctionDef name:compute_transformation arg:self arg:input arg:params arg:flags arguments arg arg arg arg Call Call Return return:yes Call FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call FunctionDef name:inverse_transform arg:self arg:input arg:flags arg:transform arg:size arguments arg arg arg arg arg If Call Raise Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "staged_predict", + "source_code": "def staged_predict(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n yield self._loss.link.inverse(raw_predictions.ravel())", + "docstring": "Predict regression target for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted values of the input samples, for each iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg For Call Call Call" + }, + { + "library": "pytorch", + "name": "fetch_args_kwargs_from_env", + "source_code": "@compatibility(is_backward_compatible=True)\ndef fetch_args_kwargs_from_env(self, n: Node) -> tuple[tuple, dict]:\n args = self.map_nodes_to_values(n.args, n)\n assert isinstance(args, tuple)\n kwargs = self.map_nodes_to_values(n.kwargs, n)\n assert isinstance(kwargs, dict)\n return (args, kwargs)", + "docstring": "Fetch the concrete values of ``.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\interpreter.py", + "ast_data": "FunctionDef name:fetch_args_kwargs_from_env arg:self arg:n arguments arg arg Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "check_readonly_memmap_input", + "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_readonly_memmap_input(name, estimator_orig):\n X, y = make_blobs(random_state=0, n_samples=21)\n X = _enforce_estimator_tags_X(estimator_orig, X)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n X, y = create_memmap_backed_data([X, y])\n set_random_state(estimator)\n assert estimator.fit(X, y) is estimator", + "docstring": "Check that the estimator can handle readonly memmap backed data. This is particularly needed to support joblib parallelisation.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:check_readonly_memmap_input arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Compare Call Call" + }, + { + "library": "pandas", + "name": "_is_valid_endpoint", + "source_code": "def _is_valid_endpoint(endpoint) -> bool:\n return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])", + "docstring": "Helper for interval_range to check if start/end are valid types.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\interval.py", + "ast_data": "FunctionDef name:_is_valid_endpoint arg:endpoint arguments arg Return return:yes Call Call Call Call Compare" + }, + { + "library": "pytorch", + "name": "ModelInfo", + "source_code": "@dataclasses.dataclass\nclass ModelInfo:\n parameter_count: defaultdict[torch.dtype, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n buffer_count: defaultdict[torch.dtype, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n fx_node_count: int = 0\n fx_node_op_count: defaultdict[str, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n fx_node_target_count: defaultdict[str, int] = dataclasses.field(default_factory=lambda: defaultdict(int))\n dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field(default_factory=list)\n inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(default_factory=dict)\n outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(default_factory=dict)", + "docstring": "Information about the model.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py", + "ast_data": "ClassDef name:ModelInfo Call arguments Call Call arguments Call Call arguments Call Call arguments Call Call Call Call" + }, + { + "library": "django", + "name": "precision", + "source_code": "@property\ndef precision(self):\n return capi.get_field_precision(self.ptr)", + "docstring": "Return the precision of this Field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:precision arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_copy_weights_to_distributed_model", + "source_code": "def _copy_weights_to_distributed_model(original_model, mode):\n strategy = original_model._distribution_strategy\n distributed_model = get_distributed_model(original_model, mode)\n if strategy:\n orig_model_weights = original_model.get_weights()\n first_model = strategy.unwrap(distributed_model)[0]\n set_weights(strategy, first_model, orig_model_weights)", + "docstring": "Copies weights from original model to distributed models.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:_copy_weights_to_distributed_model arg:original_model arg:mode arguments arg arg Assign Assign Call If Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_get_concrete_function_garbage_collected", + "source_code": "def _get_concrete_function_garbage_collected(self, *args, **kwargs):\n with self._lock:\n if self._variable_creation_config is None:\n initializers = []\n self._initialize(args, kwargs, add_initializers_to=initializers)\n self._initialize_uninitialized_variables(initializers)\n if self._created_variables:\n return tracing_compilation.trace_function(args, kwargs, dataclasses.replace(self._no_variable_creation_config, bind_graph_to_function=True))\n elif self._variable_creation_config is not None:\n concrete = tracing_compilation.trace_function(args, kwargs, dataclasses.replace(self._variable_creation_config, bind_graph_to_function=True))\n if self._created_variables:\n raise ValueError('Creating variables on a non-first call to a function decorated with tf.function.')\n return concrete", + "docstring": "Returns a specialized to inputs and execution context. Unlike , the graph will be deleted when the returned function is deleted. It's useful to avoid creating a reference cycle when you know for sure that the graph will be no longer used without the returned function. Args: *args: inputs to specialize on. **kwargs: inputs to specialize on. Returns: A TensorFlow function which takes exactly one per argument. Raises: ValueError: if this object has not yet been called on concrete values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:_get_concrete_function_garbage_collected arg:self arguments arg arg arg With If Compare Assign Call Call If Return return:yes Call Call If Compare Assign Call Call If Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "as_default", + "source_code": "def as_default(self):\n return stack.default_session(self)", + "docstring": "Returns a context manager that makes this object the default session. Use with the keyword to specify that calls to or should be executed in this session. To get the current default session, use . *N.B.* The context manager *does not* close the session when you exit the context, and you must close the session explicitly. Alternatively, you can use to create a session that is automatically closed on exiting the context, including when an uncaught exception is raised. *N.B.* The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a in that thread's function. *N.B.* Entering a block does not affect the current default graph. If you are using multiple graphs, and is different from the value of , you must explicitly enter a block to make the default graph. Returns: A context manager using this session as the default session.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:as_default arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_pad", + "source_code": "def _pad(batch):\n padded_dict_batch = {}\n if isinstance(batch, dict):\n for key, value in batch.items():\n padded_dict_batch[key] = _pad(value)\n return padded_dict_batch\n rank = len(batch.shape)\n assert rank > 0\n missing_count = self.padded_batch_size - self.get_real_batch_size(batch)\n padding = backend.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n return array_ops.pad(batch, padding, 'constant')", + "docstring": "Helper function to pad nested data within each batch elements.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py", + "ast_data": "FunctionDef name:_pad arg:batch arguments arg Assign If Call For Call Assign Call Return return:yes Assign Call Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "SixHumpCamel", + "source_code": "class SixHumpCamel(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n self.custom_bounds = [(-2, 2), (-1.5, 1.5)]\n self.global_optimum = [(0.08984201368301331, -0.7126564032704135), (-0.08984201368301331, 0.7126564032704135)]\n self.fglob = -1.031628\n\n def fun(self, x, *args):\n self.nfev += 1\n return (4 - 2.1 * x[0] ** 2 + x[0] ** 4 / 3) * x[0] ** 2 + x[0] * x[1] + (4 * x[1] ** 2 - 4) * x[1] ** 2", + "docstring": "Six Hump Camel objective function. This class defines the Six Hump Camel [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{SixHumpCamel}}(x) = 4x_1^2+x_1x_2-4x_2^2-2.1x_1^4+ 4x_2^4+\\frac{1}{3}x_1^6 with :math: for :math:. *Global optimum*: :math: for :math: or :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:SixHumpCamel FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "make_seeds", + "source_code": "def make_seeds(self, count=1):\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n keys = self._make_int64_keys(shape=[count])\n zeros = array_ops.zeros_like(keys)\n return array_ops_stack.stack([keys, zeros])\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))", + "docstring": "Generates seeds for stateless random ops. For example: Args: count: the number of seed pairs (note that stateless random ops need a pair of seeds to invoke). Returns: A tensor of shape [2, count] and dtype int64.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:make_seeds arg:self arg:count arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes Call Raise Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, bus, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n SimplePlugin.__init__(self, bus)\n self.stdin = stdin\n self.stdout = stdout\n self.stderr = stderr\n self.finalized = False", + "docstring": "Initialize the daemonizer plugin.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:stdin arg:stdout arg:stderr arguments arg arg arg arg arg Call Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "refine_names", + "source_code": "def refine_names(self, *names):\n if has_torch_function_unary(self):\n return handle_torch_function(Tensor.refine_names, (self,), self, *names)\n names = resolve_ellipsis(names, self.names, 'refine_names')\n return super().refine_names(names)", + "docstring": "Refines the dimension names of :attr: according to :attr:. Refining is a special case of renaming that \"lifts\" unnamed dimensions. A `namesnames`). Args: names (iterable of str): The desired names of the output tensor. May contain up to one Ellipsis. Examples:: >>> imgs = torch.randn(32, 3, 128, 128) >>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W') >>> named_imgs.names ('N', 'C', 'H', 'W') >>> tensor = torch.randn(2, 3, 5, 7, 11) >>> tensor = tensor.refine_names('A', ..., 'B', 'C') >>> tensor.names ('A', None, None, 'B', 'C') .. warning:: The named tensor API is experimental and subject to change.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:refine_names arg:self arguments arg arg If Call Return return:yes Call Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_set_names", + "source_code": "def _set_names(self, names, *, level=None) -> None:\n if names is not None and (not is_list_like(names)):\n raise ValueError('Names should be list-like for a MultiIndex')\n names = list(names)\n if level is not None and len(names) != len(level):\n raise ValueError('Length of names must match length of level.')\n if level is None and len(names) != self.nlevels:\n raise ValueError('Length of names must match number of levels in MultiIndex.')\n if level is None:\n level = range(self.nlevels)\n else:\n level = (self._get_level_number(lev) for lev in level)\n for lev, name in zip(level, names):\n if name is not None:\n if not is_hashable(name):\n raise TypeError(f'{type(self).__name__}.name must be a hashable type')\n self._names[lev] = name\n self._reset_cache('levels')", + "docstring": "Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:_set_names arg:self arg:names arguments arg arg arg If BoolOp Compare Call Raise Call Assign Call If BoolOp Compare Compare Call Call Raise Call If BoolOp Compare Compare Call Raise Call If Compare Assign Call Assign Call For Call If Compare If Call Raise Call Call Assign Call" + }, + { + "library": "scrapy", + "name": "iter_all", + "source_code": "def iter_all(class_name: str) -> Iterable[Any]:\n for cls, wdict in live_refs.items():\n if cls.__name__ == class_name:\n return wdict.keys()\n return []", + "docstring": "Iterate over all objects of the same class by its class name", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\trackref.py", + "ast_data": "FunctionDef name:iter_all arg:class_name arguments arg For Call If Compare Return return:yes Call Return return:no" + }, + { + "library": "django", + "name": "has_add_permission", + "source_code": "def has_add_permission(self, request):\n opts = self.opts\n codename = get_permission_codename('add', opts)\n return request.user.has_perm('%s.%s' % (opts.app_label, codename))", + "docstring": "Return True if the given request has permission to add an object. Can be overridden by the user in subclasses.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:has_add_permission arg:self arg:request arguments arg arg Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_BesselY1Grad", + "source_code": "@ops.RegisterGradient('BesselY1')\ndef _BesselY1Grad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n partial_x = special_math_ops.bessel_y0(x) - math_ops.div(y, x)\n return grad * partial_x", + "docstring": "Compute gradient of bessel_y1(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_BesselY1Grad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_quant_params", + "source_code": "def _get_quant_params(tensor_detail: Mapping[str, Any]) -> Optional[Tuple[float, int]]:\n quant_params = tensor_detail['quantization_parameters']\n if not quant_params:\n return None\n if quant_params['scales'] and quant_params['zero_points']:\n return (quant_params['scales'][0], quant_params['zero_points'][0])\n return None", + "docstring": "Returns first scale and zero point from tensor detail, if present.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_get_quant_params arg:tensor_detail arguments arg Assign If Return return:no If BoolOp Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "_FloorDivGrad", + "source_code": "@ops.RegisterGradient('FloorDiv')\ndef _FloorDivGrad(_, unused_grad):\n return (None, None)", + "docstring": "The gradient for the FloorDiv operator.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_FloorDivGrad arg:_ arg:unused_grad arguments arg arg Return return:no Call" + }, + { + "library": "cherrypy", + "name": "average_uriset_time", + "source_code": "def average_uriset_time(s):\n return s['Count'] and s['Sum'] / s['Count'] or 0", + "docstring": "Compute average request processing time within a URI set.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:average_uriset_time arg:s arguments arg Return return:yes BoolOp BoolOp" + }, + { + "library": "pytorch", + "name": "set_object_type", + "source_code": "def set_object_type(self, object_type: Union[Callable, str], qconfig: QConfigAny) -> QConfigMapping:\n self.object_type_qconfigs[object_type] = qconfig\n return self", + "docstring": "Set the QConfig for a given module type, function, or method name. If the QConfig for an existing object type was already set, the new QConfig will override the old one.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py", + "ast_data": "FunctionDef name:set_object_type arg:self arg:object_type arg:qconfig arguments arg arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_combine_handle_data", + "source_code": "def _combine_handle_data(handle, initial_value):\n assert handle.dtype == dtypes.resource\n variable_handle_data = get_eager_safe_handle_data(handle)\n if initial_value.dtype != dtypes.variant:\n return variable_handle_data\n extra_handle_data = get_eager_safe_handle_data(initial_value)\n if extra_handle_data is not None and extra_handle_data.is_set:\n if variable_handle_data is None or not variable_handle_data.is_set or len(variable_handle_data.shape_and_type) != 1:\n raise RuntimeError(f\"Expected VarHandleOp to return a length==1 shape_and_type, but saw: '{variable_handle_data}'\")\n variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)\n return variable_handle_data", + "docstring": "Concats HandleData from tensors and . Args: handle: A of dtype . initial_value: A . Returns: A . If has dtype , the contains the concatenation of the shape_and_type from both and . Raises: RuntimeError: If handle, which was returned by VarHandleOp, either has no handle data, or its len(handle_data.shape_and_type) != 1.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:_combine_handle_data arg:handle arg:initial_value arguments arg arg Compare Assign Call If Compare Return return:yes Assign Call If BoolOp Compare If BoolOp Compare Compare Call Raise Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "edit_margin", + "source_code": "def edit_margin(self, todo, size, cell):\n self.solver.suggestValue(self.margins[todo][cell], size)\n self.margin_vals[todo][cell] = size", + "docstring": "Change the size of the margin for one cell. Parameters ---------- todo : string (one of 'left', 'right', 'bottom', 'top') margin to alter. size : float Size of the margin. If it is larger than the existing minimum it updates the margin size. Fraction of figure size. cell : int Cell column or row to edit.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py", + "ast_data": "FunctionDef name:edit_margin arg:self arg:todo arg:size arg:cell arguments arg arg arg arg Call Assign" + }, + { + "library": "pytorch", + "name": "hardsigmoid", + "source_code": "def hardsigmoid(input: Tensor, inplace: bool=False) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.hardsigmoid' must be quantized!\")\n if inplace:\n return torch._C._nn.hardsigmoid_(input)\n return torch._C._nn.hardsigmoid(input)", + "docstring": "This is the quantized version of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:hardsigmoid arg:input arg:inplace arguments arg arg If Raise Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "run_all_keras_modes", + "source_code": "def run_all_keras_modes(test_or_class=None, config=None, always_skip_v1=False, always_skip_eager=False, **kwargs):\n if kwargs:\n raise ValueError('Unrecognized keyword args: {}'.format(kwargs))\n params = [('_v2_function', 'v2_function')]\n if not always_skip_eager:\n params.append(('_v2_eager', 'v2_eager'))\n if not (always_skip_v1 or tf2.enabled()):\n params.append(('_v1_session', 'v1_session'))\n\n def single_method_decorator(f):\n\n @parameterized.named_parameters(*params)\n @functools.wraps(f)\n def decorated(self, run_mode, *args, **kwargs):\n if run_mode == 'v1_session':\n _v1_session_test(f, self, config, *args, **kwargs)\n elif run_mode == 'v2_eager':\n _v2_eager_test(f, self, *args, **kwargs)\n elif run_mode == 'v2_function':\n _v2_function_test(f, self, *args, **kwargs)\n else:\n return ValueError('Unknown run mode %s' % run_mode)\n return decorated\n return _test_or_class_decorator(test_or_class, single_method_decorator)", + "docstring": "Execute the decorated test with all keras execution modes. This decorator is intended to be applied either to individual test methods in a class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once executing in legacy graph mode, once running eagerly and with returning True, and once running eagerly with returning False. If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and the test will only run twice. Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. For example, consider the following unittest: This test will try compiling & fitting the small functional mlp using all three Keras execution modes. Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. always_skip_v1: If True, does not try running the legacy graph mode even when Tensorflow v2 behavior is not enabled. always_skip_eager: If True, does not execute the decorated test with eager execution modes. **kwargs: Additional kwargs for configuring tests for in-progress Keras behaviors/ refactorings that we haven't fully rolled out yet Returns: Returns a decorator that will run the decorated test method multiple times. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py", + "ast_data": "FunctionDef name:run_all_keras_modes arg:test_or_class arg:config arg:always_skip_v1 arg:always_skip_eager arguments arg arg arg arg arg If Raise Call Call Assign If Call If BoolOp Call Call FunctionDef name:single_method_decorator arg:f arguments arg FunctionDef name:decorated arg:self arg:run_mode arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Return return:yes Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "authlib", + "name": "encrypt", + "source_code": "def encrypt(self, msg, aad, iv, key):\n self.check_iv(iv)\n chacha = Cryptodome_ChaCha20_Poly1305.new(key=key, nonce=iv)\n chacha.update(aad)\n ciphertext, tag = chacha.encrypt_and_digest(msg)\n return (ciphertext, tag)", + "docstring": "Content Encryption with AEAD_XCHACHA20_POLY1305. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, tag)", + "type": "method", + "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptodome.py", + "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_label_position", + "source_code": "def set_label_position(self, position):\n self.label.set_rotation_mode('anchor')\n self.label.set_verticalalignment(_api.check_getitem({'left': 'bottom', 'right': 'top'}, position=position))\n self.label_position = position\n self.stale = True", + "docstring": "Set the label position (left or right) Parameters ---------- position : {'left', 'right'}", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_label_position arg:self arg:position arguments arg arg Call Call Call Assign Assign" + }, + { + "library": "django", + "name": "reset", + "source_code": "def reset(self):\n pass", + "docstring": "Reset any state maintained by the loader instance (e.g. cached templates or cached loader modules).", + "type": "method", + "file_path": "django\\django\\template\\loaders\\base.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "_IndexedSlicesToTensorNoWarning", + "source_code": "def _IndexedSlicesToTensorNoWarning(indexed_slices):\n if not isinstance(indexed_slices, indexed_slices_lib.IndexedSlices):\n return indexed_slices\n if indexed_slices.dense_shape is None:\n raise ValueError('Tensor conversion requested for IndexedSlices without dense_shape: %s' % str(indexed_slices))\n return math_ops.unsorted_segment_sum(indexed_slices.values, indexed_slices.indices, indexed_slices.dense_shape[0])", + "docstring": "Converts an IndexedSlices to a Tensor without sparse->dense warnings.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_IndexedSlicesToTensorNoWarning arg:indexed_slices arguments arg If Call Return return:yes If Compare Raise Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "to_numpy", + "source_code": "def to_numpy(self, x: Any) -> 'np.array':\n if isinstance(x, (Tensor,)):\n return x.cpu().detach().numpy()\n if isinstance(x, (np.ndarray,)):\n return x\n if isinstance(x, (Image.Image,)):\n return np.array(x)\n raise TypeError('Input type not supported')", + "docstring": "Convert input to numpy array. Args: x: The input to convert. Returns: np.array: The converted numpy array.", + "type": "method", + "file_path": "kornia\\kornia\\core\\mixin\\image_module.py", + "ast_data": "FunctionDef name:to_numpy arg:self arg:x arguments arg arg If Call Return return:yes Call Call Call If Call Return return:yes If Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_load_distributed_snapshot", + "source_code": "def _load_distributed_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, reader_func: Callable[[dataset_ops.Dataset], dataset_ops.Dataset]) -> dataset_ops.Dataset:\n dataset = _ListSnapshotChunksDataset(path)\n dataset = dataset.map(lambda chunk_file: _SnapshotChunkDataset(chunk_file, element_spec=_parse_element_spec(metadata.element_spec), compression=metadata.compression))\n return reader_func(dataset)", + "docstring": "Loads a distributed snapshot.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py", + "ast_data": "FunctionDef name:_load_distributed_snapshot arg:path arg:metadata arg:reader_func arguments arg arg arg Assign Call Assign Call arguments arg Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__iter__", + "source_code": "def __iter__(self):\n return self", + "docstring": "Return iterator.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "cumprod", + "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef cumprod(self, numeric_only: bool=False, *args, **kwargs) -> NDFrameT:\n nv.validate_groupby_func('cumprod', args, kwargs, ['skipna'])\n return self._cython_transform('cumprod', numeric_only, **kwargs)", + "docstring": "Cumulative product for each group. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. *args : tuple Positional arguments to be passed to . **kwargs : dict Additional/specific keyword arguments to be passed to the function, such as and . Returns ------- Series or DataFrame Cumulative product for each group. Same object type as the caller. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"b\"] >>> ser = pd.Series([6, 2, 0], index=lst) >>> ser a 6 a 2 b 0 dtype: int64 >>> ser.groupby(level=0).cumprod() a 6 a 12 b 0 dtype: int64 For DataFrameGroupBy: >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] >>> df = pd.DataFrame( ... data, columns=[\"a\", \"b\", \"c\"], index=[\"cow\", \"horse\", \"bull\"] ... ) >>> df a b c cow 1 8 2 horse 1 2 5 bull 2 6 9 >>> df.groupby(\"a\").groups {1: ['cow', 'horse'], 2: ['bull']} >>> df.groupby(\"a\").cumprod() b c cow 8 2 horse 16 10 bull 6 9", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:cumprod arg:self arg:numeric_only arguments arg arg arg arg Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "random_normal_variable", + "source_code": "@doc_controls.do_not_generate_docs\ndef random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None):\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n if seed is None:\n seed = np.random.randint(1000000000.0)\n value = init_ops.random_normal_initializer(mean, scale, dtype=tf_dtype, seed=seed)(shape)\n return variable(value, dtype=dtype, name=name)", + "docstring": "Instantiates a variable with values drawn from a normal distribution. Args: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:random_normal_variable arg:shape arg:mean arg:scale arg:dtype arg:name arg:seed arguments arg arg arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_bbox_to_anchor", + "source_code": "def get_bbox_to_anchor(self):\n if self._bbox_to_anchor is None:\n return self.parent.bbox\n else:\n return self._bbox_to_anchor", + "docstring": "Return the bbox that the legend will be anchored to.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:get_bbox_to_anchor arg:self arguments arg If Compare Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "pipe", + "source_code": "def pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T:\n return com.pipe(self, func, *args, **kwargs)", + "docstring": "Apply `funcDataFrame.pipeStyler.applyStyler.set_properties` context to define the level. >>> def highlight_last_level(styler): ... return styler.apply_index( ... lambda v: \"background-color: pink; color: yellow\", ... axis=\"columns\", ... level=styler.columns.nlevels - 1, ... ) # doctest: +SKIP >>> df.columns = pd.MultiIndex.from_product([[\"A\", \"B\"], [\"X\", \"Y\"]]) >>> df.style.pipe(highlight_last_level) # doctest: +SKIP .. figure:: ../../_static/style/df_pipe_applymap.png Additionally suppose we want to highlight a column header if there is any missing data in that column. In this case we need the data object itself to determine the effect on the column headers. >>> def highlight_header_missing(styler, level): ... def dynamic_highlight(s): ... return np.where( ... styler.data.isna().any(), \"background-color: red;\", \"\" ... ) ... ... return styler.apply_index(dynamic_highlight, axis=1, level=level) >>> df.style.pipe(highlight_header_missing, level=1) # doctest: +SKIP .. figure:: ../../_static/style/df_pipe_applydata.png", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:pipe arg:self arg:func arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "zeros", + "source_code": "@property\ndef zeros(self):\n return self._zeros", + "docstring": "Zeros of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:zeros arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "CppOptions", + "source_code": "class CppOptions(BuildOptionsBase):\n\n def __init__(self, compile_only: bool=False, warning_all: bool=True, extra_flags: Sequence[str]=(), use_relative_path: bool=False, compiler: str='', min_optimize: bool=False, precompiling: bool=False, preprocessing: bool=False) -> None:\n super().__init__(compile_only=compile_only, use_relative_path=use_relative_path, precompiling=precompiling, preprocessing=preprocessing)\n self._compiler = compiler if compiler else get_cpp_compiler()\n definitions, include_dirs, cflags, ldflags, libraries_dirs, libraries, passthrough_args = get_cpp_options(cpp_compiler=self._compiler, do_link=not (compile_only or precompiling or preprocessing), extra_flags=extra_flags, warning_all=warning_all, min_optimize=min_optimize)\n _append_list(self._definitions, definitions)\n _append_list(self._include_dirs, include_dirs)\n _append_list(self._cflags, cflags)\n _append_list(self._ldflags, ldflags)\n _append_list(self._libraries_dirs, libraries_dirs)\n _append_list(self._libraries, libraries)\n _append_list(self._passthrough_args, passthrough_args)\n self._finalize_options()", + "docstring": "This class is inherited from BuildOptionsBase, and as cxx build options. This option need contains basic cxx build option, which contains: 1. OS related args. 2. Toolchains related args. 3. Cxx standard related args. Note: 1. This Options is good for assist modules build, such as x86_isa_help.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\cpp_builder.py", + "ast_data": "ClassDef name:CppOptions FunctionDef name:__init__ arg:self arg:compile_only arg:warning_all arg:extra_flags arg:use_relative_path arg:compiler arg:min_optimize arg:precompiling arg:preprocessing arguments arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call BoolOp Call Call Call Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "axisinfo", + "source_code": "@staticmethod\ndef axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo:\n tz = unit\n majloc = PandasAutoDateLocator(tz=tz)\n majfmt = PandasAutoDateFormatter(majloc, tz=tz)\n datemin = pydt.date(2000, 1, 1)\n datemax = pydt.date(2010, 1, 1)\n return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax))", + "docstring": "Return the :class: for *unit*. *unit* is a tzinfo instance or None. The *axis* argument is required but not used.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", + "ast_data": "FunctionDef name:axisinfo arg:unit arg:axis arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_args_to_tuple", + "source_code": "def _args_to_tuple(self, node):\n builder = _ArgTemplateBuilder()\n for a in node.args:\n if isinstance(a, gast.Starred):\n builder.add_stararg(a.value)\n else:\n builder.add_arg(a)\n builder.finalize()\n return builder.to_ast()", + "docstring": "Ties together all positional and *arg arguments in a single tuple.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\call_trees.py", + "ast_data": "FunctionDef name:_args_to_tuple arg:self arg:node arguments arg arg Assign Call For If Call Call Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "remove_internal", + "source_code": "def remove_internal(self, sprite):\n self._spritelist.remove(sprite)\n old_rect = self.spritedict[sprite]\n if old_rect is not self._init_rect:\n self.lostsprites.append(old_rect)\n if hasattr(sprite, 'rect'):\n self.lostsprites.append(sprite.rect)\n del self.spritedict[sprite]\n del self._spritelayers[sprite]", + "docstring": "Do not use this method directly. The group uses it to add a sprite.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:remove_internal arg:self arg:sprite arguments arg arg Call Assign If Compare Call If Call Call" + }, + { + "library": "tensorflow", + "name": "SquaredHinge", + "source_code": "class SquaredHinge(MeanMetricWrapper):\n\n def __init__(self, name='squared_hinge', dtype=None):\n super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)", + "docstring": "Computes the squared hinge metric between and . values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SquaredHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.86 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.46 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:SquaredHinge FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "start_rasterizing", + "source_code": "def start_rasterizing(self):\n self.figure.dpi = self.dpi\n self._raster_renderer = self._raster_renderer_class(self._width * self.dpi, self._height * self.dpi, self.dpi)\n self._renderer = self._raster_renderer\n if self._bbox_inches_restore:\n r = process_figure_for_rasterizing(self.figure, self._bbox_inches_restore, self._raster_renderer)\n self._bbox_inches_restore = r", + "docstring": "Enter \"raster\" mode. All subsequent drawing commands (until is called) will be drawn with the raster backend.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_mixed.py", + "ast_data": "FunctionDef name:start_rasterizing arg:self arguments arg Assign Assign Call Assign If Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "SessionRunArgs", + "source_code": "@tf_export(v1=['train.SessionRunArgs'])\nclass SessionRunArgs(collections.namedtuple('SessionRunArgs', ['fetches', 'feed_dict', 'options'])):\n\n def __new__(cls, fetches, feed_dict=None, options=None):\n return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)", + "docstring": "Represents arguments to be added to a call. Args: fetches: Exactly like the 'fetches' argument to Session.Run(). Can be a single tensor or op, a list of 'fetches' or a dictionary of fetches. For example: fetches = global_step_tensor fetches = [train_op, summary_op, global_step_tensor] fetches = {'step': global_step_tensor, 'summ': summary_op} Note that this can recurse as expected: fetches = {'step': global_step_tensor, 'ops': [train_op, check_nan_op]} feed_dict: Exactly like the argument to options: Exactly like the argument to , i.e., a config_pb2.RunOptions proto.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "ClassDef name:SessionRunArgs Call FunctionDef name:__new__ arg:cls arg:fetches arg:feed_dict arg:options arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "track_node_mutations", + "source_code": "def track_node_mutations(self, node: Node, flat_args_kwargs: list[Any], id_to_initial_version: dict[int, int]) -> None:\n mutated_arg_positions = OrderedSet[int]()\n for i, arg in enumerate(flat_args_kwargs):\n val_id = id(arg)\n if val_id in id_to_initial_version and id_to_initial_version[val_id] != arg._version:\n mutated_arg_positions.add(i)\n if mutated_arg_positions:\n self.node_to_mutated_arg_positions[node] = mutated_arg_positions", + "docstring": "This function tracks which argument positions are mutated by the given node. Subgraph HOP does not support input mutations today so we will skip regions which have inputs that are mutated.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\graph_region_tracker.py", + "ast_data": "FunctionDef name:track_node_mutations arg:self arg:node arg:flat_args_kwargs arg:id_to_initial_version arguments arg arg arg arg Assign Call For Call Assign Call If BoolOp Compare Compare Call If Assign" + }, + { + "library": "pandas", + "name": "combine", + "source_code": "def combine(self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable | None=None) -> Series:\n if fill_value is None:\n fill_value = na_value_for_dtype(self.dtype, compat=False)\n if isinstance(other, Series):\n new_index = self.index.union(other.index)\n new_name = ops.get_op_result_name(self, other)\n new_values = np.empty(len(new_index), dtype=object)\n with np.errstate(all='ignore'):\n for i, idx in enumerate(new_index):\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n new_values[i] = func(lv, rv)\n else:\n new_index = self.index\n new_values = np.empty(len(new_index), dtype=object)\n with np.errstate(all='ignore'):\n new_values[:] = [func(lv, other) for lv in self._values]\n new_name = self.name\n npvalues = lib.maybe_convert_objects(new_values, try_float=False)\n same_dtype = isinstance(self.dtype, (StringDtype, CategoricalDtype))\n res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=same_dtype)\n return self._constructor(res_values, index=new_index, name=new_name, copy=False)", + "docstring": "Combine the Series with a Series or scalar according to . Combine the Series and using to perform elementwise selection for combined Series. is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the . func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:combine arg:self arg:other arg:func arg:fill_value arguments arg arg arg arg If Compare Assign Call If Call Assign Call Assign Call Assign Call Call With Call For Call Assign Call Assign Call Assign Call Assign Assign Call Call With Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "build_shuffle_then_shuffle", + "source_code": "def build_shuffle_then_shuffle(input_tensors, first_gather_devices, second_gather_devices, red_op, un_op=None):\n\n def upper_builder(tensors):\n return build_shuffle_all_reduce(tensors, second_gather_devices, red_op, un_op)\n\n def upper_level_f(tensors):\n return _reduce_non_singleton(tensors, upper_builder, un_op)\n return _build_shuffle_hybrid(input_tensors, first_gather_devices, red_op, upper_level_f)", + "docstring": "Construct hybrid of Shuffle within workers, Shuffle across workers.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:build_shuffle_then_shuffle arg:input_tensors arg:first_gather_devices arg:second_gather_devices arg:red_op arg:un_op arguments arg arg arg arg arg FunctionDef name:upper_builder arg:tensors arguments arg Return return:yes Call FunctionDef name:upper_level_f arg:tensors arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_find_proxy", + "source_code": "def _find_proxy(*objects_to_search):\n proxy = None\n\n def find_proxy(x):\n nonlocal proxy\n if isinstance(x, Proxy):\n proxy = x\n map_aggregate(objects_to_search, find_proxy)\n return proxy", + "docstring": "Recursively search a data structure for a Proxy() and return it, return None if not found.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:_find_proxy arguments arg Assign FunctionDef name:find_proxy arg:x arguments arg If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "render", + "source_code": "def render(self, context):\n pass", + "docstring": "Return the node rendered as a string.", + "type": "method", + "file_path": "django\\django\\template\\base.py", + "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg" + }, + { + "library": "pytorch", + "name": "print_disas", + "source_code": "def print_disas(self, *, file=None, stacklevel=0):\n tx = self.__get_tx(stacklevel)\n print(dis.Bytecode(tx.f_code, current_offset=tx.instructions[tx.instruction_pointer].offset).dis(), file=file)", + "docstring": "Print the current series of opcodes being executed (not including parent frames), including where you are in the particular opcode stream.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:print_disas arg:self arguments arg arg arg Assign Call Call Call Call" + }, + { + "library": "kornia", + "name": "transform_boxes_", + "source_code": "def transform_boxes_(self, M: torch.Tensor) -> Boxes:\n return self.transform_boxes(M, inplace=True)", + "docstring": "Inplace version of :func:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\boxes.py", + "ast_data": "FunctionDef name:transform_boxes_ arg:self arg:M arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "LazyBatchNorm1d", + "source_code": "class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):\n cls_to_become = BatchNorm1d\n\n def _check_input_dim(self, input):\n if input.dim() != 2 and input.dim() != 3:\n raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')", + "docstring": "A :class: module with lazy initialization. Lazy initialization based on the `BatchNorm1dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinrunning_meanrunning_var`", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py", + "ast_data": "ClassDef name:LazyBatchNorm1d Assign FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "_partial_update", + "source_code": "def _partial_update(original: torch.Tensor, new: torch.Tensor, dim: int, n_chunks: int, idx: int, add: bool) -> torch.Tensor:\n chunks = list(original.chunk(n_chunks, dim=dim))\n assert chunks[idx].shape == new.shape, (original.shape, new.shape, idx)\n if add:\n chunks[idx] += new\n else:\n chunks[idx] = new\n return torch.cat(chunks, dim=dim)", + "docstring": "This API partially update a chunk of ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py", + "ast_data": "FunctionDef name:_partial_update arg:original arg:new arg:dim arg:n_chunks arg:idx arg:add arguments arg arg arg arg arg arg Assign Call Call Compare If Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_output_details", + "source_code": "def get_output_details(self):\n result = {}\n for output_name, tensor_index in self._outputs:\n result[output_name] = self._interpreter._get_tensor_details(tensor_index, self._subgraph_index)\n return result", + "docstring": "Gets output tensor details. Returns: A dictionary from input name to tensor details where each item is a dictionary with details about an output tensor. The dictionary contains the same fields as described for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:get_output_details arg:self arguments arg Assign For Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_BesselK1eGrad", + "source_code": "@ops.RegisterGradient('BesselK1e')\ndef _BesselK1eGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n partial_x = y * (1.0 - math_ops.reciprocal(x)) - special_math_ops.bessel_k0e(x)\n return grad * partial_x", + "docstring": "Compute gradient of bessel_k1e(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_BesselK1eGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "get_count", + "source_code": "def get_count():\n _check_init()\n return _pypm.CountDevices()", + "docstring": "gets the number of devices. pygame.midi.get_count(): return num_devices Device ids range from 0 to get_count() -1", + "type": "function", + "file_path": "pygame\\src_py\\midi.py", + "ast_data": "FunctionDef name:get_count arguments Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "DatabaseError", + "source_code": "class DatabaseError(OSError):\n pass", + "docstring": "Error is raised when executing SQL with bad syntax or SQL that throws an error. Raised by :func: when a bad SQL statement is passed in. See Also -------- read_sql : Read SQL query or database table into a DataFrame. Examples -------- >>> from sqlite3 import connect >>> conn = connect(\":memory:\") >>> pd.read_sql(\"select * test\", conn) # doctest: +SKIP", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:DatabaseError" + }, + { + "library": "tensorflow", + "name": "_finish_log_prob_for_one_fiber", + "source_code": "def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):\n x = self._maybe_rotate_dims(x, rotate_right=True)\n log_prob = self.distribution.log_prob(x)\n if self._is_maybe_event_override:\n log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)\n log_prob += math_ops.cast(ildj, log_prob.dtype)\n if self._is_maybe_event_override and isinstance(event_ndims, int):\n log_prob.set_shape(array_ops.broadcast_static_shape(y.get_shape().with_rank_at_least(1)[:-event_ndims], self.batch_shape))\n return log_prob", + "docstring": "Finish computation of log_prob on one element of the inverse image.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_finish_log_prob_for_one_fiber arg:self arg:y arg:x arg:ildj arg:event_ndims arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Call If BoolOp Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "margin_ranking_loss", + "source_code": "def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float=0, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n if has_torch_function_variadic(input1, input2, target):\n return handle_torch_function(margin_ranking_loss, (input1, input2, target), input1, input2, target, margin=margin, size_average=size_average, reduce=reduce, reduction=reduction)\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n if input1.dim() != input2.dim() or input1.dim() != target.dim():\n raise RuntimeError(f'margin_ranking_loss : All input tensors should have same dimension but got sizes: input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} ')\n return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)", + "docstring": "Compute the margin ranking loss. See :class: for details. Args: input1 (Tensor): Predicted values. input2 (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Margin ranking loss.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:margin_ranking_loss arg:input1 arg:input2 arg:target arg:margin arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If BoolOp Compare Call Call Compare Call Call Raise Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reduce", + "source_code": "def reduce(self, initial_state, reduce_fn):\n iterator = iter(self)\n optional_data = iterator.get_next_as_optional()\n\n def cond(optional_data, state):\n del state\n return optional_data.has_value()\n\n def loop_body(optional_data, state):\n state = reduce_fn(state, optional_data.get_value())\n optional_data = iterator.get_next_as_optional()\n return (optional_data, state)\n optional_data, final_state = while_loop.while_loop(cond, loop_body, [optional_data, initial_state], parallel_iterations=1, return_same_structure=True)\n return final_state", + "docstring": "Execute a over all the elements of the input.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:reduce arg:self arg:initial_state arg:reduce_fn arguments arg arg arg Assign Call Assign Call FunctionDef name:cond arg:optional_data arg:state arguments arg arg Return return:yes Call FunctionDef name:loop_body arg:optional_data arg:state arguments arg arg Assign Call Call Assign Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_pull_records", + "source_code": "def _pull_records(js: dict[str, Any], spec: list | str) -> list:\n result = _pull_field(js, spec, extract_record=True)\n if not isinstance(result, list):\n if pd.isnull(result):\n result = []\n else:\n raise TypeError(f'Path must contain list or null, but got {type(result).__name__} at {spec!r}')\n return result", + "docstring": "Internal function to pull field for records, and similar to _pull_field, but require to return list. And will raise error if has non iterable value.", + "type": "function", + "file_path": "pandas\\pandas\\io\\json\\_normalize.py", + "ast_data": "FunctionDef name:_pull_records arg:js arg:spec arguments arg arg Assign Call If Call If Call Assign Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "compiled_fx_graph_hash", + "source_code": "def compiled_fx_graph_hash(gm: torch.fx.GraphModule, example_inputs: Sequence[InputType], fx_kwargs: _CompileFxKwargs, inputs_to_check: Sequence[int]) -> tuple[str, list[str]]:\n details = FxGraphHashDetails(gm, example_inputs, fx_kwargs, inputs_to_check)\n has_user_defined_triton_kernels = len(details.user_defined_triton_source) != 0\n pickler = FxGraphCachePickler(gm, has_user_defined_triton_kernels)\n key = 'f' + pickler.get_hash(details)\n debug_lines = pickler.debug_lines(details)\n debug_str = '\\n'.join(debug_lines)\n log.debug(f'FX graph cache hash details for key {key}:\\n{debug_str}')\n return (key, debug_lines)", + "docstring": "Generate a unique hash of the FX graph for caching.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:compiled_fx_graph_hash arg:gm arg:example_inputs arg:fx_kwargs arg:inputs_to_check arguments arg arg arg arg Assign Call Assign Compare Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ELU", + "source_code": "class ELU(Module):\n __constants__ = ['alpha', 'inplace']\n alpha: float\n inplace: bool\n\n def __init__(self, alpha: float=1.0, inplace: bool=False) -> None:\n super().__init__()\n self.alpha = alpha\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.elu(input, self.alpha, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = ', inplace=True' if self.inplace else ''\n return f'alpha={self.alpha}{inplace_str}'", + "docstring": "Applies the Exponential Linear Unit (ELU) function, element-wise. Method described in the paper: __. ELU is defined as: .. math:: \\text{ELU}(x) = \\begin{cases} x, & \\text{ if } x > 0\\\\ \\alpha * (\\exp(x) - 1), & \\text{ if } x \\leq 0 \\end{cases} Args: alpha: the :math: value for the ELU formulation. Default: 1.0 inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/ELU.png Examples:: >>> m = nn.ELU() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:ELU Assign FunctionDef name:__init__ arg:self arg:alpha arg:inplace arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_outputs_meta", + "source_code": "def get_outputs_meta(self) -> tuple[torch.Tensor, ...]:\n assert self._outputs_meta is not None, 'Attempted to get_outputs_meta() without configuring output meta'\n return self._outputs_meta", + "docstring": "Get the output metadata (meta tensors) reprensenting the outputs of this stage", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", + "ast_data": "FunctionDef name:get_outputs_meta arg:self arguments arg Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "parents", + "source_code": "@property\ndef parents(self):\n return [self.categorical_column]", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_nested_row_lengths", + "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None, validate=True):\n if not isinstance(validate, bool):\n raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n if isinstance(nested_row_lengths, tensor_lib.Tensor):\n raise TypeError(f'Argument `nested_row_lengths` must be a list of Tensors. Received {nested_row_lengths}.')\n with ops.name_scope(name, 'RaggedFromNestedRowlengths', [flat_values] + list(nested_row_lengths)):\n result = flat_values\n for lengths in reversed(nested_row_lengths):\n result = cls.from_row_lengths(result, lengths, validate=validate)\n return result", + "docstring": "Creates a from a nested list of tensors. Equivalent to: Args: flat_values: A potentially ragged tensor. nested_row_lengths: A list of 1-D integer tensors. The th tensor is used as the for the th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A (or if is empty).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:from_nested_row_lengths arg:cls arg:flat_values arg:nested_row_lengths arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call If Call Raise Call With Call Call Assign For Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_AxesStack", + "source_code": "class _AxesStack:\n\n def __init__(self):\n self._axes = {}\n self._counter = itertools.count()\n\n def as_list(self):\n return [*self._axes]\n\n def remove(self, a):\n self._axes.pop(a)\n\n def bubble(self, a):\n if a not in self._axes:\n raise ValueError('Axes has not been added yet')\n self._axes[a] = next(self._counter)\n\n def add(self, a):\n if a not in self._axes:\n self._axes[a] = next(self._counter)\n\n def current(self):\n return max(self._axes, key=self._axes.__getitem__, default=None)\n\n def __getstate__(self):\n return {**vars(self), '_counter': max(self._axes.values(), default=0)}\n\n def __setstate__(self, state):\n next_counter = state.pop('_counter')\n vars(self).update(state)\n self._counter = itertools.count(next_counter)", + "docstring": "Helper class to track Axes in a figure. Axes are tracked both in the order in which they have been added (`` dict).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "ClassDef name:_AxesStack FunctionDef name:__init__ arg:self arguments arg Assign Assign Call FunctionDef name:as_list arg:self arguments arg Return return:yes FunctionDef name:remove arg:self arg:a arguments arg arg Call FunctionDef name:bubble arg:self arg:a arguments arg arg If Compare Raise Call Assign Call FunctionDef name:add arg:self arg:a arguments arg arg If Compare Assign Call FunctionDef name:current arg:self arguments arg Return return:yes Call FunctionDef name:__getstate__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Call Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "next_iter", + "source_code": "def next_iter(self):\n self._iter += 1\n self.handles_post_forward_order.clear()\n if self._checking_order:\n self.current_order_index = 0\n if self.warn_status == _ExecOrderWarnStatus.WARNING:\n self.warn_status = _ExecOrderWarnStatus.WARNED", + "docstring": "Advances the internal data structures per iteration. This should be called in the post-backward callback since that marks the true end of an iteration.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py", + "ast_data": "FunctionDef name:next_iter arg:self arguments arg Call If Assign If Compare Assign" + }, + { + "library": "pytorch", + "name": "serialize", + "source_code": "def serialize(self, obj):\n f = io.BytesIO()\n p = _pickler(f)\n p.dispatch_table = self._dispatch_table\n p.dispatch_table[dist.rpc.PyRRef] = self._py_rref_reducer\n p.dispatch_table[dist.rpc.RRef] = self._rref_reducer\n if isinstance(obj, torch.jit.ScriptModule):\n p.dispatch_table[obj.__class__] = self._script_module_reducer\n for class_name in self._class_reducer_dict.keys():\n p.dispatch_table[class_name] = self._class_reducer_dict[class_name]\n global _thread_local_tensor_tables\n if hasattr(_thread_local_tensor_tables, 'send_tables'):\n old_send_tables = _thread_local_tensor_tables.send_tables\n else:\n old_send_tables = None\n _thread_local_tensor_tables.send_tables = []\n p.dump(obj)\n tensors = _thread_local_tensor_tables.send_tables\n if old_send_tables is not None:\n _thread_local_tensor_tables.send_tables = old_send_tables\n else:\n del _thread_local_tensor_tables.send_tables\n return (f.getvalue(), tensors)", + "docstring": "Serialize non tensor data into binary string, tensor data into tensor table", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\rpc\\internal.py", + "ast_data": "FunctionDef name:serialize arg:self arg:obj arguments arg arg Assign Call Assign Call Assign Assign Assign If Call Assign For Call Assign If Call Assign Assign Assign Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "_remove_nan_1d", + "source_code": "def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False):\n if arr1d.dtype == object:\n c = np.not_equal(arr1d, arr1d, dtype=bool)\n else:\n c = np.isnan(arr1d)\n s = np.nonzero(c)[0]\n if s.size == arr1d.size:\n warnings.warn('All-NaN slice encountered', RuntimeWarning, stacklevel=6)\n if second_arr1d is None:\n return (arr1d[:0], None, True)\n else:\n return (arr1d[:0], second_arr1d[:0], True)\n elif s.size == 0:\n return (arr1d, second_arr1d, overwrite_input)\n else:\n if not overwrite_input:\n arr1d = arr1d.copy()\n enonan = arr1d[-s.size:][~c[-s.size:]]\n arr1d[s[:enonan.size]] = enonan\n if second_arr1d is None:\n return (arr1d[:-s.size], None, True)\n else:\n if not overwrite_input:\n second_arr1d = second_arr1d.copy()\n enonan = second_arr1d[-s.size:][~c[-s.size:]]\n second_arr1d[s[:enonan.size]] = enonan\n return (arr1d[:-s.size], second_arr1d[:-s.size], True)", + "docstring": "Equivalent to arr1d[~arr1d.isnan()], but in a different order Presumably faster as it incurs fewer copies Parameters ---------- arr1d : ndarray Array to remove nans from second_arr1d : ndarray or None A second array which will have the same positions removed as arr1d. overwrite_input : bool True if can be modified in place Returns ------- res : ndarray Array with nan elements removed second_res : ndarray or None Second array with nan element positions of first array removed. overwrite_input : bool True if can be modified in place, given the constraint on the input", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py", + "ast_data": "FunctionDef name:_remove_nan_1d arg:arr1d arg:second_arr1d arg:overwrite_input arguments arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Call If Compare Return return:yes Return return:yes If Compare Return return:yes If Assign Call Assign Assign If Compare Return return:yes If Assign Call Assign Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_append_legend_handles_labels", + "source_code": "@final\ndef _append_legend_handles_labels(self, handle: Artist, label: str) -> None:\n self.legend_handles.append(handle)\n self.legend_labels.append(label)", + "docstring": "Append current handle and label to ``. These will be used to make the legend.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py", + "ast_data": "FunctionDef name:_append_legend_handles_labels arg:self arg:handle arg:label arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_get_context_id", + "source_code": "def _get_context_id(self, context):\n if context in self._context_to_id:\n return self._context_to_id[context]\n graph_is_new = False\n with self._context_lock:\n if context not in self._context_to_id:\n graph_is_new = True\n context_id = _get_id()\n self._context_to_id[context] = context_id\n if graph_is_new:\n self.get_writer().WriteDebuggedGraph(debug_event_pb2.DebuggedGraph(graph_id=context_id, graph_name=getattr(context, 'name', None), outer_context_id=self._get_outer_context_id(context)))\n return self._context_to_id[context]", + "docstring": "Get a unique ID for an op-construction context (e.g., a graph). If the graph has been encountered before, reuse the same unique ID. When encountering a new context (graph), this methods writes a DebugEvent proto with the debugged_graph field to the proper DebugEvent file. Args: context: A context to get the unique ID for. Must be hashable. E.g., a Graph object. Returns: A unique ID for the context.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", + "ast_data": "FunctionDef name:_get_context_id arg:self arg:context arguments arg arg If Compare Return return:yes Assign With If Compare Assign Assign Call Assign If Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self):\n self._number_of_shards = None", + "docstring": "Creates a new TpuContext.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_function.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign" + }, + { + "library": "matplotlib", + "name": "get_fontsize", + "source_code": "def get_fontsize(self):\n return self.prop.get_size_in_points()", + "docstring": "Return the fontsize in points.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_fontsize arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "render", + "source_code": "def render(self, name, value, attrs=None, renderer=None):\n context = self.get_context(name, value, attrs)\n return self._render(self.template_name, context, renderer)", + "docstring": "Render the widget as an HTML string.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:render arg:self arg:name arg:value arg:attrs arg:renderer arguments arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "style", + "source_code": "@property\ndef style(self) -> Styler:\n has_jinja2 = import_optional_dependency('jinja2', errors='ignore')\n if not has_jinja2:\n raise AttributeError(\"The '.style' accessor requires jinja2\")\n from pandas.io.formats.style import Styler\n return Styler(self)", + "docstring": "Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2, 3]}) >>> df.style # doctest: +SKIP Please see _ for more examples.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:style arg:self arguments arg Assign Call If Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "line_collection_2d_to_3d", + "source_code": "def line_collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):\n segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)\n col.__class__ = Line3DCollection\n col.set_segments(segments3d)\n col._axlim_clip = axlim_clip", + "docstring": "Convert a to a object.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:line_collection_2d_to_3d arg:col arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Call Assign Call Assign" + }, + { + "library": "sphinx", + "name": "handle_signature", + "source_code": "def handle_signature(self, sig: str, signode: desc_signature) -> ObjDescT:\n raise ValueError", + "docstring": "Parse the signature *sig*. The individual nodes are then appended to *signode*. If ValueError is raised, parsing is aborted and the whole *sig* is put into a single desc_name node. The return value should be a value that identifies the object. It is passed to :meth: unchanged, and otherwise only used to skip duplicates.", + "type": "method", + "file_path": "sphinx\\sphinx\\directives\\__init__.py", + "ast_data": "FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_backward_function_wrapper", + "source_code": "def _backward_function_wrapper(*args):\n if not backward.outputs:\n return backward.structured_outputs\n processed_args = []\n input_index = 0\n for output_index, arg in enumerate(args):\n if isinstance(arg, indexed_slices.IndexedSlices):\n arg = ops.convert_to_tensor(arg)\n if output_index in skip_positions:\n continue\n if arg is None:\n input_placeholder = backward.inputs[input_index]\n if input_placeholder.dtype == dtypes.variant:\n arg = variant_zeros_like[output_index]\n else:\n arg = array_ops.zeros(*default_gradient.shape_and_dtype(input_placeholder))\n processed_args.append(arg)\n input_index += 1\n if input_index >= backward_function_inputs:\n break\n return backward._call_flat(processed_args, remapped_captures)", + "docstring": "Process output gradients and call the backward function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:_backward_function_wrapper arguments arg If Return return:yes Assign Assign For Call If Call Assign Call If Compare If Compare Assign If Compare Assign Assign Call Call Call If Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_tf_data_packed_nest_with_indices", + "source_code": "def _tf_data_packed_nest_with_indices(structure, flat, index):\n packed = []\n for s in _tf_data_yield_value(structure):\n if _tf_data_is_nested(s):\n new_index, child = _tf_data_packed_nest_with_indices(s, flat, index)\n packed.append(sequence_like(s, child))\n index = new_index\n else:\n packed.append(flat[index])\n index += 1\n return (index, packed)", + "docstring": "Helper function for pack_nest_as. Args: structure: Substructure (tuple of elements and/or tuples) to mimic flat: Flattened values to output substructure for. index: Index at which to start reading from flat. Returns: The tuple (new_index, child), where: * new_index - the updated index into having processed . * packed - the subset of corresponding to , having started at , and packed into the same nested format. Raises: ValueError: if contains more elements than (assuming indexing starts from ).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py", + "ast_data": "FunctionDef name:_tf_data_packed_nest_with_indices arg:structure arg:flat arg:index arguments arg arg arg Assign For Call If Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "KerasModelTypeCombination", + "source_code": "class KerasModelTypeCombination(test_combinations.TestCombination):\n\n def context_managers(self, kwargs):\n model_type = kwargs.pop('model_type', None)\n if model_type in KERAS_MODEL_TYPES:\n return [testing_utils.model_type_scope(model_type)]\n else:\n return []\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('model_type')]", + "docstring": "Combination for Keras model types when doing model test. It by default includes 'functional', 'subclass', 'sequential'. Various methods in to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\combinations.py", + "ast_data": "ClassDef name:KerasModelTypeCombination FunctionDef name:context_managers arg:self arg:kwargs arguments arg arg Assign Call If Compare Return return:yes Call Return return:no FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_transform_input_tensor", + "source_code": "def _transform_input_tensor(self, input_tensor, state_manager=None):\n if self.dtype.is_integer != input_tensor.dtype.is_integer:\n raise ValueError('Column dtype and SparseTensors dtype must be compatible. key: {}, column dtype: {}, tensor dtype: {}'.format(self.key, self.dtype, input_tensor.dtype))\n fc_utils.assert_string_or_int(input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key))\n key_dtype = self.dtype\n if input_tensor.dtype.is_integer:\n key_dtype = dtypes.int64\n input_tensor = math_ops.cast(input_tensor, dtypes.int64)\n name = '{}_lookup'.format(self.key)\n if state_manager is None or not state_manager.has_resource(self, name):\n with ops.init_scope():\n table = lookup_ops.index_table_from_tensor(vocabulary_list=tuple(self.vocabulary_list), default_value=self.default_value, num_oov_buckets=self.num_oov_buckets, dtype=key_dtype, name=name)\n if state_manager is not None:\n state_manager.add_resource(self, name, table)\n else:\n table = state_manager.get_resource(self, name)\n return table.lookup(input_tensor)", + "docstring": "Creates a lookup table for the vocabulary list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_transform_input_tensor arg:self arg:input_tensor arg:state_manager arguments arg arg arg If Compare Raise Call Call Call Call Assign If Assign Assign Call Assign Call If BoolOp Compare Call With Call Assign Call Call If Compare Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_elementwise_where_v2", + "source_code": "def _elementwise_where_v2(condition, x, y):\n if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and y.shape.is_fully_defined() and (x.shape == y.shape) and (condition.shape == x.shape)):\n shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(condition)\n shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x)\n shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y)\n shape = ragged_tensor_shape.broadcast_dynamic_shape(shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y))\n condition = ragged_tensor_shape.broadcast_to(condition, shape)\n x = ragged_tensor_shape.broadcast_to(x, shape)\n y = ragged_tensor_shape.broadcast_to(y, shape)\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where_v2(condition, x, y)\n return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x, y)", + "docstring": "Ragged version of tf.where_v2(condition, x, y).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_where_op.py", + "ast_data": "FunctionDef name:_elementwise_where_v2 arg:condition arg:x arg:y arguments arg arg arg If BoolOp Call Call Call Compare Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "model_unpickle", + "source_code": "def model_unpickle(model_id):\n if isinstance(model_id, tuple):\n model = apps.get_model(*model_id)\n else:\n model = model_id\n return model.__new__(model)", + "docstring": "Used to unpickle Model subclasses with deferred fields.", + "type": "function", + "file_path": "django\\django\\db\\models\\base.py", + "ast_data": "FunctionDef name:model_unpickle arg:model_id arguments arg If Call Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "normalize", + "source_code": "def normalize(self) -> 'MemoryDep':\n return MemoryDep(self.name, *_RecordLoadStoreInner._normalize(self.index, self.ranges), self.mode)", + "docstring": "Normalize by merging loops. The different to normalize_with_stride_order is, this method does not reorder loops while normalize_with_stride_order reorder loops based on stride order.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\dependencies.py", + "ast_data": "FunctionDef name:normalize arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "PyrDown", + "source_code": "class PyrDown(Module):\n\n def __init__(self, border_type: str='reflect', align_corners: bool=False, factor: float=2.0) -> None:\n super().__init__()\n self.border_type: str = border_type\n self.align_corners: bool = align_corners\n self.factor: float = factor\n\n def forward(self, input: Tensor) -> Tensor:\n return pyrdown(input, self.border_type, self.align_corners, self.factor)", + "docstring": "Blur a tensor and downsamples it. Args: border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H / 2, W / 2)` Examples: >>> input = torch.rand(1, 2, 4, 4) >>> output = PyrDown()(input) # 1x2x2x2", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py", + "ast_data": "ClassDef name:PyrDown FunctionDef name:__init__ arg:self arg:border_type arg:align_corners arg:factor arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_export_to_saved_model_graph", + "source_code": "def _export_to_saved_model_graph(self, object_map=None, tensor_map=None, options=None, **kwargs):\n new_variable = None\n if options.experimental_variable_policy._save_variable_devices():\n with ops.device(self.device):\n new_variable = copy_to_graph_uninitialized(self)\n else:\n new_variable = copy_to_graph_uninitialized(self)\n object_map[self] = new_variable\n tensor_map[self.handle] = new_variable.handle\n return [self.handle]", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign If Call With Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "ComplexFloatingFormat", + "source_code": "class ComplexFloatingFormat:\n\n def __init__(self, x, precision, floatmode, suppress_small, sign=False, *, legacy=None):\n if isinstance(sign, bool):\n sign = '+' if sign else '-'\n floatmode_real = floatmode_imag = floatmode\n if legacy <= 113:\n floatmode_real = 'maxprec_equal'\n floatmode_imag = 'maxprec'\n self.real_format = FloatingFormat(x.real, precision, floatmode_real, suppress_small, sign=sign, legacy=legacy)\n self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, suppress_small, sign='+', legacy=legacy)\n\n def __call__(self, x):\n r = self.real_format(x.real)\n i = self.imag_format(x.imag)\n sp = len(i.rstrip())\n i = i[:sp] + 'j' + i[sp:]\n return r + i", + "docstring": "Formatter for subtypes of np.complexfloating", + "type": "class", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "ClassDef name:ComplexFloatingFormat FunctionDef name:__init__ arg:self arg:x arg:precision arg:floatmode arg:suppress_small arg:sign arguments arg arg arg arg arg arg arg If Call Assign Assign If Compare Assign Assign Assign Call Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "unpack_tuple_and_ellipses", + "source_code": "def unpack_tuple_and_ellipses(item: tuple):\n if len(item) > 1:\n if item[0] is Ellipsis:\n item = item[1:]\n elif item[-1] is Ellipsis:\n item = item[:-1]\n if len(item) > 1:\n raise IndexError('too many indices for array.')\n item = item[0]\n return item", + "docstring": "Possibly unpack arr[..., n] to arr[n]", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexers\\utils.py", + "ast_data": "FunctionDef name:unpack_tuple_and_ellipses arg:item arguments arg If Compare Call If Compare Assign If Compare Assign If Compare Call Raise Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_foreground", + "source_code": "def set_foreground(self, fg, isRGBA=False):\n if self._forced_alpha and isRGBA:\n self._rgb = fg[:3] + (self._alpha,)\n elif self._forced_alpha:\n self._rgb = colors.to_rgba(fg, self._alpha)\n elif isRGBA:\n self._rgb = fg\n else:\n self._rgb = colors.to_rgba(fg)", + "docstring": "Set the foreground color. Parameters ---------- fg : :mpltype: isRGBA : bool If *fg* is known to be an `` tuple, *isRGBA* can be set to True to improve performance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_foreground arg:self arg:fg arg:isRGBA arguments arg arg arg If BoolOp Assign If Assign Call If Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "_crossed_column", + "source_code": "def _crossed_column(keys, hash_bucket_size, hash_key=None):\n if not hash_bucket_size or hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be > 1. hash_bucket_size: {}'.format(hash_bucket_size))\n if not keys or len(keys) < 2:\n raise ValueError('keys must be a list with length > 1. Given: {}'.format(keys))\n for key in keys:\n if not isinstance(key, six.string_types) and (not isinstance(key, _CategoricalColumn)):\n raise ValueError('Unsupported key type. All keys must be either string, or categorical column except _HashedCategoricalColumn. Given: {}'.format(key))\n if isinstance(key, _HashedCategoricalColumn):\n raise ValueError('categorical_column_with_hash_bucket is not supported for crossing. Hashing before crossing will increase probability of collision. Instead, use the feature name as a string. Given: {}'.format(key))\n return _CrossedColumn(keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)", + "docstring": "Returns a column for performing crosses of categorical features. Crossed features are hashed according to . Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % For example, if the input features are: * SparseTensor referred by first key: * SparseTensor referred by second key: then crossed feature will look like: Here is an example to create a linear model with crosses of string features: You could also use vocabulary lookup before crossing: If an input feature is of numeric type, you can use , or , as in the example: To use crossed column in DNN model, you need to add it in an embedding column as in this example: Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Uses the corresponding feature which must be of string type. * : Uses the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A . Raises: ValueError: If . ValueError: If any of the keys is neither a string nor . ValueError: If any of the keys is . ValueError: If .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_crossed_column arg:keys arg:hash_bucket_size arg:hash_key arguments arg arg arg If BoolOp Compare Raise Call Call If BoolOp Compare Call Raise Call Call For If BoolOp Call Call Raise Call Call If Call Raise Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_should_fallback_to_positional", + "source_code": "@cache_readonly\ndef _should_fallback_to_positional(self) -> bool:\n return False", + "docstring": "Should an integer key be treated as positional?", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_should_fallback_to_positional arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "@classmethod\ndef inverse(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if extra_args is None:\n extra_args = {}\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n if module.transform_matrix is None:\n raise ValueError(f'No valid transformation matrix found in {module.__class__}.')\n transform = module.compute_inverse_transformation(module.transform_matrix)\n input = module.inverse_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, transform=transform, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d mask operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.container.ImageSequentialBase):\n input = module.inverse_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n input = MaskSequentialOps.inverse(input, module=module.op, param=param, extra_args=extra_args)\n return input", + "docstring": "Inverse a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign If Call If Compare Raise Call Assign Call Assign Call Call If Call Raise Call If Call Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_hook", + "source_code": "def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type:\n typ, _, api = ctx\n name = typ.name.split('.')[-1]\n name_new = _PRECISION_DICT[f'{_MODULE}._nbit.{name}']\n return cast('TypeAnalyser', api).named_type(name_new)", + "docstring": "Replace a type-alias with a concrete `` subclass.", + "type": "function", + "file_path": "numpy\\numpy\\typing\\mypy_plugin.py", + "ast_data": "FunctionDef name:_hook arg:ctx arguments arg Assign Assign Call Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "UserDefinedListVariable", + "source_code": "class UserDefinedListVariable(UserDefinedObjectVariable):\n _nonvar_fields = UserDefinedObjectVariable._nonvar_fields\n\n def __init__(self, value, list_vt=None, **kwargs):\n super().__init__(value, **kwargs)\n self._list_vt = list_vt\n if self._list_vt is None:\n assert self.source is None, 'list_vt must be constructed by builder.py when source is present'\n self._list_vt = variables.ListVariable([], mutation_type=ValueMutationNew())\n\n def call_method(self, tx, name, args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n assert self._list_vt is not None\n method = self._maybe_get_baseclass_method(name)\n if method in list_methods:\n return self._list_vt.call_method(tx, name, args, kwargs)\n return super().call_method(tx, name, args, kwargs)\n\n def unpack_var_sequence(self, tx):\n assert self._list_vt is not None\n if type(self.value).__iter__ is list.__iter__:\n return self._list_vt.unpack_var_sequence(tx)\n raise NotImplementedError\n\n def is_underlying_vt_modified(self, side_effects):\n return side_effects.is_modified(self._list_vt)", + "docstring": "Represents user defined objects that are subclasses of lists. Internally, it uses a ListVariable to represent the list part of the variable tracker. For everything else, it falls back to UserDefinedObjectVariable.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\user_defined.py", + "ast_data": "ClassDef name:UserDefinedListVariable Assign FunctionDef name:__init__ arg:self arg:value arg:list_vt arguments arg arg arg arg Call Call Assign If Compare Compare Assign Call Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg Compare Assign Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:unpack_var_sequence arg:self arg:tx arguments arg arg Compare If Compare Call Return return:yes Call Raise FunctionDef name:is_underlying_vt_modified arg:self arg:side_effects arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "is_cupy_namespace", + "source_code": "@lru_cache(100)\ndef is_cupy_namespace(xp: Namespace) -> bool:\n return xp.__name__ in {'cupy', _compat_module_name() + '.cupy'}", + "docstring": "Returns True if is a CuPy namespace. This includes both CuPy itself and the version wrapped by array-api-compat. See Also -------- array_namespace is_numpy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_cupy_namespace arg:xp arguments arg Return return:yes Compare Call Call" + }, + { + "library": "scikit-learn", + "name": "_tolerance", + "source_code": "def _tolerance(X, tol):\n if tol == 0:\n return 0\n if sp.issparse(X):\n variances = mean_variance_axis(X, axis=0)[1]\n else:\n variances = np.var(X, axis=0)\n return np.mean(variances) * tol", + "docstring": "Return a tolerance which is dependent on the dataset.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py", + "ast_data": "FunctionDef name:_tolerance arg:X arg:tol arguments arg arg If Compare Return return:yes If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sharded_type_as", + "source_code": "def sharded_type_as(args, kwargs, pg):\n st = args[0]\n tensor = args[1]\n if isinstance(tensor, ShardedTensor):\n tensor = tensor.local_tensor()\n new_local_shards = [Shard(shard.tensor.type_as(tensor), shard.metadata) for shard in st.local_shards()]\n st_meta = copy.deepcopy(st._metadata)\n st_meta.tensor_properties.dtype = tensor.dtype\n return (new_local_shards, st_meta)", + "docstring": "Handles ``. Return: new_local_shards (List[Shard]): Local shards for the new sharded tensor. st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\tensor_ops.py", + "ast_data": "FunctionDef name:sharded_type_as arg:args arg:kwargs arg:pg arguments arg arg arg Assign Assign If Call Assign Call Assign Call Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_path_from_root", + "source_code": "@property\ndef _path_from_root(self) -> Generator[CUDAGraphNode, None, None]:\n nodes = reversed(list(self._path_to_root))\n yield from nodes", + "docstring": "Returns all nodes in the path starting at the root and ending at self", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:_path_from_root arg:self arguments arg Assign Call Call" + }, + { + "library": "pandas", + "name": "NotThisMethod", + "source_code": "class NotThisMethod(Exception):\n pass", + "docstring": "Exception raised if a method is not valid for the current scenario.", + "type": "class", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "ClassDef name:NotThisMethod" + }, + { + "library": "pandas", + "name": "_prepare_categoricals", + "source_code": "def _prepare_categoricals(self, data: DataFrame) -> DataFrame:\n is_cat = [isinstance(dtype, CategoricalDtype) for dtype in data.dtypes]\n if not any(is_cat):\n return data\n self._has_value_labels |= np.array(is_cat)\n get_base_missing_value = StataMissingValue.get_base_missing_value\n data_formatted = []\n for col, col_is_cat in zip(data, is_cat):\n if col_is_cat:\n svl = StataValueLabel(data[col], encoding=self._encoding)\n self._value_labels.append(svl)\n dtype = data[col].cat.codes.dtype\n if dtype == np.int64:\n raise ValueError('It is not possible to export int64-based categorical data to Stata.')\n values = data[col].cat.codes._values.copy()\n if values.max() >= get_base_missing_value(dtype):\n if dtype == np.int8:\n dtype = np.dtype(np.int16)\n elif dtype == np.int16:\n dtype = np.dtype(np.int32)\n else:\n dtype = np.dtype(np.float64)\n values = np.array(values, dtype=dtype)\n values[values == -1] = get_base_missing_value(dtype)\n data_formatted.append((col, values))\n else:\n data_formatted.append((col, data[col]))\n return DataFrame.from_dict(dict(data_formatted))", + "docstring": "Check for categorical columns, retain categorical information for Stata file and convert categorical data to int", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_prepare_categoricals arg:self arg:data arguments arg arg Assign Call If Call Return return:yes Call Assign Assign For Call If Assign Call Call Assign If Compare Raise Call Assign Call If Compare Call Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Compare Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_has_precomputed_nvals", + "source_code": "def _has_precomputed_nvals(self):\n return self._nvals is not None", + "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_has_precomputed_nvals arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "__getitem__", + "source_code": "def __getitem__(self, key):\n if key is None:\n key = self._key()\n value = self._get_recursive(key)\n if value is None:\n value = self[key] = self.default_factory()\n return value", + "docstring": "Gets the value at key (or current context), or sets default value. Args: key: May be or object. When , the key is set to the current context. Returns: Either the cached or default value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Compare Assign Call Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_group", + "source_code": "@final\ndef get_group(self, name) -> DataFrame | Series:\n keys = self.keys\n level = self.level\n if is_list_like(level) and len(level) == 1 or (is_list_like(keys) and len(keys) == 1):\n if isinstance(name, tuple) and len(name) == 1:\n name = name[0]\n else:\n raise KeyError(name)\n inds = self._get_index(name)\n if not len(inds):\n raise KeyError(name)\n return self._selected_obj.iloc[inds]", + "docstring": "Construct DataFrame from group with provided name. Parameters ---------- name : object The name of the group to get as a DataFrame. Returns ------- Series or DataFrame Get the respective Series or DataFrame corresponding to the group provided. See Also -------- DataFrameGroupBy.groups: Dictionary representation of the groupings formed during a groupby operation. DataFrameGroupBy.indices: Provides a mapping of group rows to positions of the elements. SeriesGroupBy.groups: Dictionary representation of the groupings formed during a groupby operation. SeriesGroupBy.indices: Provides a mapping of group rows to positions of the elements. Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"b\"] >>> ser = pd.Series([1, 2, 3], index=lst) >>> ser a 1 a 2 b 3 dtype: int64 >>> ser.groupby(level=0).get_group(\"a\") a 1 a 2 dtype: int64 For DataFrameGroupBy: >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] >>> df = pd.DataFrame( ... data, columns=[\"a\", \"b\", \"c\"], index=[\"owl\", \"toucan\", \"eagle\"] ... ) >>> df a b c owl 1 2 3 toucan 1 5 6 eagle 7 8 9 >>> df.groupby(by=[\"a\"]).get_group((1,)) a b c owl 1 2 3 toucan 1 5 6 For Resampler: >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... [\"2023-01-01\", \"2023-01-15\", \"2023-02-01\", \"2023-02-15\"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample(\"MS\").get_group(\"2023-01-01\") 2023-01-01 1 2023-01-15 2 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:get_group arg:self arg:name arguments arg arg Assign Assign If BoolOp BoolOp Call Compare Call BoolOp Call Compare Call If BoolOp Call Compare Call Assign Raise Call Assign Call If Call Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "delete", + "source_code": "def delete(self, loc) -> list[Block]:\n if not is_list_like(loc):\n loc = [loc]\n if self.ndim == 1:\n values = cast(np.ndarray, self.values)\n values = np.delete(values, loc)\n mgr_locs = self._mgr_locs.delete(loc)\n return [type(self)(values, placement=mgr_locs, ndim=self.ndim)]\n if np.max(loc) >= self.values.shape[0]:\n raise IndexError\n loc = np.concatenate([loc, [self.values.shape[0]]])\n mgr_locs_arr = self._mgr_locs.as_array\n new_blocks: list[Block] = []\n previous_loc = -1\n refs = self.refs if self.refs.has_reference() else None\n for idx in loc:\n if idx == previous_loc + 1:\n pass\n else:\n values = self.values[previous_loc + 1:idx, :]\n locs = mgr_locs_arr[previous_loc + 1:idx]\n nb = type(self)(values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs)\n new_blocks.append(nb)\n previous_loc = idx\n return new_blocks", + "docstring": "Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:delete arg:self arg:loc arguments arg arg If Call Assign If Compare Assign Call Assign Call Assign Call Return return:yes Call Call If Compare Call Raise Assign Call Assign Assign Assign Call For If Compare Assign Assign Assign Call Call Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "log", + "source_code": "def log(X, /):\n if np.any(X.support()[0] < 0):\n message = 'The logarithm of a random variable is only implemented when the support is non-negative.'\n raise NotImplementedError(message)\n return MonotonicTransformedDistribution(X, g=np.log, h=np.exp, dh=np.exp, logdh=lambda u: u)", + "docstring": "Natural logarithm of a non-negative random variable Parameters ---------- X : The random variable :math: with positive support. Returns ------- Y : A random variable :math:. Examples -------- Suppose we have a gamma distributed random variable :math:: >>> import numpy as np >>> from scipy import stats >>> Gamma = stats.make_distribution(stats.gamma) >>> X = Gamma(a=1.0) We wish to have a exp-gamma distributed random variable :math:, a random variable whose natural exponential is :math:. If :math: is to be the natural exponential of :math:, then we must take :math: to be the natural logarithm of :math:. >>> Y = stats.log(X) To demonstrate that `Xexp(y)`')) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:log arguments arg If Call Compare Call Assign Raise Call Return return:yes Call arguments arg" + }, + { + "library": "tensorflow", + "name": "_assert_non_singular", + "source_code": "def _assert_non_singular(self):\n logging.warn('Using (possibly slow) default implementation of assert_non_singular. Requires conversion to a dense matrix and O(N^3) operations.')\n if self._can_use_cholesky():\n return self.assert_positive_definite()\n else:\n singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)\n cond = math_ops.reduce_max(singular_values, axis=-1) / math_ops.reduce_min(singular_values, axis=-1)\n return check_ops.assert_less(cond, self._max_condition_number_to_be_non_singular(), message='Singular matrix up to precision epsilon.')", + "docstring": "Private default implementation of _assert_non_singular.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_assert_non_singular arg:self arguments arg Call If Call Return return:yes Call Assign Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "Deserializer", + "source_code": "class Deserializer:\n\n def __init__(self, stream_or_string, **options):\n self.options = options\n if isinstance(stream_or_string, str):\n self.stream = StringIO(stream_or_string)\n else:\n self.stream = stream_or_string\n\n def __iter__(self):\n return self\n\n def __next__(self):\n raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')", + "docstring": "Abstract base deserializer class.", + "type": "class", + "file_path": "django\\django\\core\\serializers\\base.py", + "ast_data": "ClassDef name:Deserializer FunctionDef name:__init__ arg:self arg:stream_or_string arguments arg arg arg Assign If Call Assign Call Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Raise Call" + }, + { + "library": "scipy", + "name": "_moment_raw", + "source_code": "def _moment_raw(self, order=1, *, method=None):\n methods = self._moment_methods if method is None else {method}\n return self._moment_raw_dispatch(order, methods=methods, **self._parameters)", + "docstring": "Raw distribution moment about the origin.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:_moment_raw arg:self arg:order arguments arg arg arg Assign Compare Return return:yes Call" + }, + { + "library": "scipy", + "name": "insert_knot", + "source_code": "def insert_knot(self, x, m=1):\n if x < self.t[self.k] or x > self.t[-self.k - 1]:\n raise ValueError(f'Cannot insert a knot at {x}.')\n if m <= 0:\n raise ValueError(f'`m` must be positive, got m = {m!r}.')\n tt = self.t.copy()\n cc = self.c.copy()\n for _ in range(m):\n tt, cc = _insert(x, tt, cc, self.k, self.extrapolate == 'periodic')\n return self.construct_fast(tt, cc, self.k, self.extrapolate, self.axis)", + "docstring": "Insert a new knot at of multiplicity . Given the knots and coefficients of a B-spline representation, create a new B-spline with a knot inserted times at point . Parameters ---------- x : float The position of the new knot m : int, optional The number of times to insert the given knot (its multiplicity). Default is 1. Returns ------- spl : object A new object with the new knot inserted. Notes ----- Based on algorithms from [1]_ and [2]_. In case of a periodic spline (``t(k+1)>> import numpy as np >>> from scipy.interpolate import BSpline, make_interp_spline >>> x = np.linspace(0, 10, 5) >>> y = np.sin(x) >>> spl = make_interp_spline(x, y, k=3) >>> spl.t array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.]) Insert a single knot >>> spl_1 = spl.insert_knot(3) >>> spl_1.t array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.]) Insert a multiple knot >>> spl_2 = spl.insert_knot(8, m=3) >>> spl_2.t array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:insert_knot arg:self arg:x arg:m arguments arg arg arg If BoolOp Compare Compare Raise Call If Compare Raise Call Assign Call Assign Call For Call Assign Call Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_CoordinatedSessionCreator", + "source_code": "class _CoordinatedSessionCreator(SessionCreator):\n\n def __init__(self, session_creator, hooks, stop_grace_period_secs):\n self._session_creator = session_creator\n self._hooks = hooks\n self.coord = None\n self.tf_sess = None\n self._stop_grace_period_secs = stop_grace_period_secs\n\n def create_session(self):\n self.tf_sess = self._session_creator.create_session()\n self.coord = coordinator.Coordinator(clean_stop_exception_types=[])\n if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):\n queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)\n for hook in self._hooks:\n hook.after_create_session(self.tf_sess, self.coord)\n return _CoordinatedSession(_HookedSession(self.tf_sess, self._hooks), self.coord, self._stop_grace_period_secs)", + "docstring": "Factory for _CoordinatedSession.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "ClassDef name:_CoordinatedSessionCreator FunctionDef name:__init__ arg:self arg:session_creator arg:hooks arg:stop_grace_period_secs arguments arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:create_session arg:self arguments arg Assign Call Assign Call If Call Call For Call Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "standard_scale", + "source_code": "@staticmethod\ndef standard_scale(data2d, axis=1):\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n subtract = standardized.min()\n standardized = (standardized - subtract) / (standardized.max() - standardized.min())\n if axis == 1:\n return standardized\n else:\n return standardized.T", + "docstring": "Divide the data by the difference between the max and min Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- standardized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis.", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:standard_scale arg:data2d arg:axis arguments arg arg If Compare Assign Assign Assign Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "lecun_uniform", + "source_code": "def lecun_uniform(seed=None):\n return VarianceScaling(scale=1.0, mode='fan_in', distribution='uniform', seed=seed)", + "docstring": "LeCun uniform initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where is where is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) ( Stream:\n _lazy_init()\n streamdata = torch._C._xpu_getStreamFromExternal(data_ptr, _get_device_index(device, optional=True))\n return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])", + "docstring": "Return a :class: from an external SYCL queue. This function is used to wrap SYCL queue created in other libraries in order to facilitate data exchange and multi-library interactions. .. note:: This function doesn't manage the queue life-cycle, it is the user responsibility to keep the referenced queue alive while this returned stream is being used. The different SYCL queue pointers will result in distinct :class: objects, even if the SYCL queues they dereference are equivalent. Args: data_ptr(int): Integer representation of the value passed externally. device(torch.device or int, optional): the device where the queue was originally created. It is the user responsibility to ensure the device is specified correctly.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:get_stream_from_external arg:data_ptr arg:device arguments arg arg Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_init_values_from_proto", + "source_code": "def _init_values_from_proto(self, values_def, import_scope=None):\n assert isinstance(values_def, control_flow_pb2.ValuesDef)\n self._values = set((ops.prepend_name_scope(value, import_scope) for value in values_def.values))\n g = ops.get_default_graph()\n self._external_values = {}\n for k, v in values_def.external_values.items():\n k = ops.prepend_name_scope(k, import_scope)\n self._external_values[k] = g.as_graph_element(ops.prepend_name_scope(v, import_scope))\n op_names = set([op.split(':')[0] for op in self._values - set(self._external_values.keys())])\n for op in op_names:\n g.as_graph_element(op)._set_control_flow_context(self)", + "docstring": "Initializes values and external_values from protocol buffer. Args: values_def: protocol buffer. import_scope: Optional . Name scope to add.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_init_values_from_proto arg:self arg:values_def arg:import_scope arguments arg arg arg Call Assign Call Call Assign Call Assign For Call Assign Call Assign Call Call Assign Call Call Call Call For Call Call" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self._row_splits.dtype", + "docstring": "The used to encode the row partition (either int32 or int64).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_identify_group", + "source_code": "def _identify_group(self, key: str, append: bool) -> Node:\n group = self.get_node(key)\n assert self._handle is not None\n if group is not None and (not append):\n self._handle.remove_node(group, recursive=True)\n group = None\n if group is None:\n group = self._create_nodes_and_group(key)\n return group", + "docstring": "Identify HDF5 group based on key, delete/create group if needed.", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:_identify_group arg:self arg:key arg:append arguments arg arg arg Assign Call Compare If BoolOp Compare Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "run_benchmark_with_only_cpp_iterations", + "source_code": "def run_benchmark_with_only_cpp_iterations(self, dataset):\n dataset = dataset.skip(self.iters - 1)\n iterator = dataset_ops.make_initializable_iterator(dataset)\n next_element = iterator.get_next()\n with session.Session() as sess:\n deltas = []\n for _ in range(self.num_reps):\n sess.run(iterator.initializer)\n deltas.append(timeit.timeit(lambda: sess.run(next_element.op), number=1))\n self.report(deltas)", + "docstring": "Benchmarks the dataset with the iterations performed in C++.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\meta_benchmark.py", + "ast_data": "FunctionDef name:run_benchmark_with_only_cpp_iterations arg:self arg:dataset arguments arg arg Assign Call Assign Call Assign Call With Call Assign For Call Call Call Call arguments Call Call" + }, + { + "library": "scipy", + "name": "support", + "source_code": "def support(self, *args, **kwargs):\n args, loc, scale = self._parse_args(*args, **kwargs)\n arrs = np.broadcast_arrays(*args, loc, scale)\n args, loc, scale = (arrs[:-2], arrs[-2], arrs[-1])\n cond = self._argcheck(*args) & (scale > 0)\n _a, _b = self._get_support(*args)\n if cond.all():\n return (_a * scale + loc, _b * scale + loc)\n elif cond.ndim == 0:\n return (self.badvalue, self.badvalue)\n _a, _b = (np.asarray(_a).astype('d'), np.asarray(_b).astype('d'))\n out_a, out_b = (_a * scale + loc, _b * scale + loc)\n place(out_a, 1 - cond, self.badvalue)\n place(out_b, 1 - cond, self.badvalue)\n return (out_a, out_b)", + "docstring": "Support of the distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : array_like end-points of the distribution's support.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:support arg:self arguments arg arg arg Assign Call Assign Call Assign Assign Call Compare Assign Call If Call Return return:yes If Compare Return return:yes Assign Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "view_limits", + "source_code": "def view_limits(self, vmin, vmax):\n if vmax < vmin:\n vmin, vmax = (vmax, vmin)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n exponent, remainder = divmod(math.log10(vmax - vmin), math.log10(max(self.numticks - 1, 1)))\n exponent -= remainder < 0.5\n scale = max(self.numticks - 1, 1) ** (-exponent)\n vmin = math.floor(scale * vmin) / scale\n vmax = math.ceil(scale * vmax) / scale\n return mtransforms.nonsingular(vmin, vmax)", + "docstring": "Try to choose the view limits intelligently.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg If Compare Assign If Compare If Compare Assign Call Call Call Call Compare Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TraceType", + "source_code": "@tf_export('types.experimental.TraceType', v1=[])\nclass TraceType(metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def is_subtype_of(self, other: 'TraceType') -> bool:\n pass\n\n @abc.abstractmethod\n def most_specific_common_supertype(self, others: Sequence['TraceType']) -> Optional['TraceType']:\n pass\n\n @abc.abstractmethod\n def placeholder_value(self, placeholder_context) -> Any:\n pass\n\n def to_tensors(self, value: Any) -> List[core.Tensor]:\n del value\n return []\n\n def from_tensors(self, tensors: Iterator[core.Tensor]) -> Any:\n del tensors\n return self.placeholder_value(PlaceholderContext())\n\n def flatten(self) -> List['TraceType']:\n return []\n\n def cast(self, value, cast_context) -> Any:\n del cast_context\n assert value == self.placeholder_value(PlaceholderContext()), f'Can not cast {value!r} to type {self!r}'\n return value\n\n @abc.abstractmethod\n def __hash__(self) -> int:\n pass\n\n @abc.abstractmethod\n def __eq__(self, other) -> bool:\n pass", + "docstring": "Represents the type of object(s) for tf.function tracing purposes. is an abstract class that other classes might inherit from to provide information regarding associated class(es) for the purposes of tf.function tracing. The typing logic provided through this mechanism will be used to make decisions regarding usage of cached concrete functions and retracing. For example, if we have the following tf.function and classes: tf.function does not know when to re-use an existing concrete function in regards to the class so naively it retraces for every new instance. However, we, as the designers of the class, know that each subclass has a fixed flavor and we can reuse an existing traced concrete function if it was the same subclass. Avoiding such unnecessary tracing of concrete functions can have significant performance benefits. Now if we try calling it again:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py", + "ast_data": "ClassDef name:TraceType FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:no FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call FunctionDef name:flatten arg:self arguments arg Return return:no FunctionDef name:cast arg:self arg:value arg:cast_context arguments arg arg arg Compare Call Call Return return:yes FunctionDef name:__hash__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "max_memory_allocated", + "source_code": "def max_memory_allocated(device: 'Device'=None) -> int:\n return memory_stats(device=device).get('allocated_bytes.all.peak', 0)", + "docstring": "Return the maximum GPU memory occupied by tensors in bytes for a given device. By default, this returns the peak allocated memory since the beginning of this program. :func: can be used to reset the starting point in tracking this metric. For example, these two functions can measure the peak allocated memory usage of each iteration in a training loop. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:max_memory_allocated arg:device arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "errors", + "source_code": "@property\ndef errors(self):\n if self._errors is None:\n self.full_clean()\n return self._errors", + "docstring": "Return an ErrorDict for the data provided for the form.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:errors arg:self arguments arg If Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "finalize_prefix", + "source_code": "def finalize_prefix(self):\n old_prefix = self.prefix\n self.prefix = IndentedBuffer()\n super().finalize_prefix()\n for kernel in self._triton_call_wrappers.values():\n self.prefix.writeline('\\n')\n kernel.generate(self)\n self.prefix.writeline('\\n')\n self.prefix.splice(old_prefix)", + "docstring": "Define the triton kernels now that autotuning is finished", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_gpu.py", + "ast_data": "FunctionDef name:finalize_prefix arg:self arguments arg Assign Assign Call Call Call For Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "Mirrored", + "source_code": "@tf_export('types.experimental.distributed.Mirrored', v1=[])\nclass Mirrored(DistributedValues):\n pass", + "docstring": "Holds a distributed value: a map from replica id to synchronized values. values are for which we know that the value on all replicas is the same. values are kept synchronized by the distribution strategy in use, while values are left unsynchronized. values typically represent model weights. We can safely read a value in a cross-replica context by using the value on any replica, while values should not be read or manipulated directly by the user in a cross-replica context.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", + "ast_data": "ClassDef name:Mirrored Call" + }, + { + "library": "numpy", + "name": "sort", + "source_code": "def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, stable=None):\n a = np.array(a, copy=True, subok=True)\n if axis is None:\n a = a.flatten()\n axis = 0\n if isinstance(a, MaskedArray):\n a.sort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value, stable=stable)\n else:\n a.sort(axis=axis, kind=kind, order=order, stable=stable)\n return a", + "docstring": "Return a sorted copy of the masked array. Equivalent to creating a copy of the array and applying the MaskedArray `` for the full documentation See Also -------- MaskedArray.sort : equivalent method Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) >>> ma.sort(masked_x) masked_array(data=[-3.973, 0.801, 11.2, --], mask=[False, False, False, True], fill_value=1e+20)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:sort arg:a arg:axis arg:kind arg:order arg:endwith arg:fill_value arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Call Assign If Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_count_fx_targets", + "source_code": "def _count_fx_targets(exported_program: torch.export.ExportedProgram) -> defaultdict[str, int]:\n fx_node_target_count: defaultdict[str, int] = defaultdict(int)\n for node in exported_program.graph.nodes:\n if node.op == 'call_function':\n fx_node_target_count[str(node.target)] += 1\n return fx_node_target_count", + "docstring": "Count the number of targets for each node in the exported program.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py", + "ast_data": "FunctionDef name:_count_fx_targets arg:exported_program arguments arg Call For If Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_rasterized", + "source_code": "def set_rasterized(self, rasterized):\n supports_rasterization = getattr(self.draw, '_supports_rasterization', False)\n if rasterized and (not supports_rasterization):\n _api.warn_external(f\"Rasterization of '{self}' will be ignored\")\n self._rasterized = rasterized", + "docstring": "Force rasterized (bitmap) drawing for vector graphics output. Rasterized drawing is not supported by all artists. If you try to enable this on an artist that does not support it, the command has no effect and a warning will be issued. This setting is ignored for pixel-based output. See also :doc:. Parameters ---------- rasterized : bool", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_rasterized arg:self arg:rasterized arguments arg arg Assign Call If BoolOp Call Assign" + }, + { + "library": "tensorflow", + "name": "replicate", + "source_code": "@classmethod\ndef replicate(cls):\n return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))", + "docstring": "Returns a replicated sharding attribute. This causes an op to be computed in its entirety independently on all cores in the XLA device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", + "ast_data": "FunctionDef name:replicate arg:cls arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "slugify", + "source_code": "@keep_lazy_text\ndef slugify(value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub('[^\\\\w\\\\s-]', '', value.lower())\n return re.sub('[-\\\\s]+', '-', value).strip('-_')", + "docstring": "Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:slugify arg:value arg:allow_unicode arguments arg arg Assign Call If Assign Call Assign Call Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "register_proto_function", + "source_code": "def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None) -> None:\n if to_proto and (not callable(to_proto)):\n raise TypeError('to_proto must be callable.')\n if from_proto and (not callable(from_proto)):\n raise TypeError('from_proto must be callable.')\n _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name)", + "docstring": "Registers and functions for collection_name. function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as , , .. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:register_proto_function arg:collection_name arg:proto_type arg:to_proto arg:from_proto arguments arg arg arg arg If BoolOp Call Raise Call If BoolOp Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "NormalWithSoftplusScale", + "source_code": "class NormalWithSoftplusScale(Normal):\n\n @deprecation.deprecated('2019-01-01', 'Use `tfd.Normal(loc, tf.nn.softplus(scale)) instead.', warn_once=True)\n def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='NormalWithSoftplusScale'):\n parameters = dict(locals())\n with ops.name_scope(name, values=[scale]) as name:\n super(NormalWithSoftplusScale, self).__init__(loc=loc, scale=nn.softplus(scale, name='softplus_scale'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n self._parameters = parameters", + "docstring": "Normal with softplus applied to .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py", + "ast_data": "ClassDef name:NormalWithSoftplusScale FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Call Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "reset_max_memory_allocated", + "source_code": "def reset_max_memory_allocated(device: 'Device'=None) -> None:\n warnings.warn('torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, which resets /all/ peak memory stats.', FutureWarning)\n return reset_peak_memory_stats(device=device)", + "docstring": "Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device. See :func: for details. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `~torch.cuda.reset_peak_memory_statscuda-memory-management` for more details about GPU memory management.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:reset_max_memory_allocated arg:device arguments arg Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "rand", + "source_code": "def rand(*args):\n if isinstance(args[0], tuple):\n args = args[0]\n return asmatrix(np.random.rand(*args))", + "docstring": "Return a matrix of random values with given shape. Create a matrix of the given shape and propagate it with random samples from a uniform distribution over `\\*args`. See Also -------- randn, numpy.random.RandomState.rand Examples -------- >>> np.random.seed(123) >>> import numpy.matlib >>> np.matlib.rand(2, 3) matrix([[0.69646919, 0.28613933, 0.22685145], [0.55131477, 0.71946897, 0.42310646]]) >>> np.matlib.rand((2, 3)) matrix([[0.9807642 , 0.68482974, 0.4809319 ], [0.39211752, 0.34317802, 0.72904971]]) If the first argument is a tuple, other arguments are ignored: >>> np.matlib.rand((2, 3), 4) matrix([[0.43857224, 0.0596779 , 0.39804426], [0.73799541, 0.18249173, 0.17545176]])", + "type": "function", + "file_path": "numpy\\numpy\\matlib.py", + "ast_data": "FunctionDef name:rand arguments arg If Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "initialize_or_restore", + "source_code": "def initialize_or_restore(self, session=None):\n if context.executing_eagerly():\n return\n if session is None:\n session = get_session()\n all_objects = util.list_objects(self._object_graph_view)\n already_initialized_objects = object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n initializers_for_non_restored_variables = [c.initializer for c in all_objects if hasattr(c, 'initializer') and c not in already_initialized_objects and (getattr(c, '_update_uid', self._checkpoint.restore_uid - 1) < self._checkpoint.restore_uid)]\n self.run_restore_ops(session=session)\n session.run(initializers_for_non_restored_variables)", + "docstring": "Run operations to initialize or restore objects in the dependency graph. Any objects in the dependency graph which have initializers but are not in the checkpoint will have those initializers run, unless those variables are being restored by a later call to . This method has a sibling in which instead initializes variables. That type is returned if no checkpoint is specified in . Args: session: The session to run init/restore ops in. If , uses the default session.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call Assign Call Assign Call Call Assign BoolOp Call Compare Compare Call Call Call" + }, + { + "library": "django", + "name": "close", + "source_code": "def close(self):\n self._producer = []", + "docstring": "Used to invalidate/disable this lazy stream. Replace the producer with an empty list. Any leftover bytes that have already been read will still be reported upon read() and/or next().", + "type": "method", + "file_path": "django\\django\\http\\multipartparser.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Assign" + }, + { + "library": "tensorflow", + "name": "GetLoopConstantEnter", + "source_code": "def GetLoopConstantEnter(value):\n id_ops = {'Switch', 'RefSwitch', 'Identity', 'RefIdentity'}\n op = value.op\n while op.type in id_ops:\n op = op.inputs[0].op\n return op if IsLoopConstantEnter(op) else None", + "docstring": "Return the enter op if we can infer to be a loop invariant.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:GetLoopConstantEnter arg:value arguments arg Assign Assign While Compare Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_calculate_range_stats", + "source_code": "def _calculate_range_stats(self, x_copy):\n min_val_cur, max_val_cur = torch.aminmax(x_copy)\n epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)\n epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)\n self.epoch_activation_min.copy_(epoch_min_val)\n self.epoch_activation_max.copy_(epoch_max_val)\n current_batch_range = max_val_cur - min_val_cur\n new_range = (self.average_batch_activation_range * self.num_batches_tracked + current_batch_range) / (self.num_batches_tracked + 1)\n self.average_batch_activation_range = new_range\n self.num_batches_tracked += 1\n return x_copy", + "docstring": "Calculates and stores range stats with forward values. Args x_copy: A copy of the forward data Returns the passed in x_copy", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_observer.py", + "ast_data": "FunctionDef name:_calculate_range_stats arg:self arg:x_copy arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "impl_backward", + "source_code": "def impl_backward(self, output_differentiability=None, _stacklevel=2):\n if output_differentiability is not None:\n\n def yell():\n raise RuntimeError(f'impl_backward(output_differentiability): expected output_differentiability to be a list of bools with length equal to the number of outputs of this CustomOp got: {output_differentiability}')\n if not isinstance(output_differentiability, list):\n yell()\n for diff in output_differentiability:\n if not isinstance(diff, bool):\n yell()\n if len(self._schema.returns) != len(output_differentiability):\n yell()\n\n def inner(f):\n self._check_can_register_backward()\n self._check_doesnt_have_library_autograd_impl()\n if not self._registered_autograd_kernel_indirection:\n self._register_autograd_kernel_indirection()\n self._register_impl('backward', f, stacklevel=_stacklevel)\n self._output_differentiability = output_differentiability\n if self._has_impl('save_for_backward'):\n self._register_autograd_kernel()\n return inner", + "docstring": "This API is deprecated, please use torch.library.custom_op instead", + "type": "method", + "file_path": "pytorch\\torch\\_custom_op\\impl.py", + "ast_data": "FunctionDef name:impl_backward arg:self arg:output_differentiability arg:_stacklevel arguments arg arg arg If Compare FunctionDef name:yell arguments Raise Call If Call Call For If Call Call If Compare Call Call Call FunctionDef name:inner arg:f arguments arg Call Call If Call Call Assign If Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_round_to_power_of_two", + "source_code": "def _round_to_power_of_two(x):\n return 2 ** np.around(np.log2(x))", + "docstring": "Round elements of the array to the nearest power of two.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_util.py", + "ast_data": "FunctionDef name:_round_to_power_of_two arg:x arguments arg Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "terminal_safe", + "source_code": "def terminal_safe(s: str) -> str:\n return s.encode('ascii', 'backslashreplace').decode('ascii')", + "docstring": "Safely encode a string for printing to the terminal.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\console.py", + "ast_data": "FunctionDef name:terminal_safe arg:s arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_estimate_wishart_diag", + "source_code": "def _estimate_wishart_diag(self, nk, xk, sk):\n _, n_features = xk.shape\n self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk\n diff = xk - self.mean_prior_\n self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * (sk + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] * np.square(diff))\n self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]", + "docstring": "Estimate the diag Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components, n_features)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py", + "ast_data": "FunctionDef name:_estimate_wishart_diag arg:self arg:nk arg:xk arg:sk arguments arg arg arg arg Assign Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "smart_cond", + "source_code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if isinstance(pred, variables.Variable):\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", + "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py", + "ast_data": "FunctionDef name:smart_cond arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "deprecate_nonkeyword_arguments", + "source_code": "def deprecate_nonkeyword_arguments(version: str | None, allowed_args: list[str] | None=None, name: str | None=None) -> Callable[[F], F]:\n\n def decorate(func):\n old_sig = inspect.signature(func)\n if allowed_args is not None:\n allow_args = allowed_args\n else:\n allow_args = [p.name for p in old_sig.parameters.values() if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is p.empty]\n new_params = [p.replace(kind=p.KEYWORD_ONLY) if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.name not in allow_args else p for p in old_sig.parameters.values()]\n new_params.sort(key=lambda p: p.kind)\n new_sig = old_sig.replace(parameters=new_params)\n num_allow_args = len(allow_args)\n msg = f'{future_version_msg(version)} all arguments of {name or func.__qualname__}{{arguments}} will be keyword-only.'\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > num_allow_args:\n warnings.warn(msg.format(arguments=_format_argument_list(allow_args)), FutureWarning, stacklevel=find_stack_level())\n return func(*args, **kwargs)\n wrapper.__signature__ = new_sig\n return wrapper\n return decorate", + "docstring": "Decorator to deprecate a use of non-keyword arguments of a function. Parameters ---------- version : str, optional The version in which positional arguments will become keyword-only. If None, then the warning message won't specify any particular version. allowed_args : list, optional In case of list, it must be the list of names of some first arguments of the decorated functions that are OK to be given as positional arguments. In case of None value, defaults to list of all arguments not having the default value. name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function is used.", + "type": "function", + "file_path": "pandas\\pandas\\util\\_decorators.py", + "ast_data": "FunctionDef name:deprecate_nonkeyword_arguments arg:version arg:allowed_args arg:name arguments arg arg arg FunctionDef name:decorate arg:func arguments arg Assign Call If Compare Assign Assign Call BoolOp Compare Compare Assign BoolOp Compare Compare Call Call Call arguments arg Assign Call Assign Call Assign Call BoolOp FunctionDef name:wrapper arguments arg arg If Compare Call Call Call Call Call Return return:yes Call Call Assign Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "poisson_loss", + "source_code": "def poisson_loss(y_true, y_pred, sample_weight=None):\n return np.average(xlogy(y_true, y_true / y_pred) - y_true + y_pred, weights=sample_weight, axis=0).sum()", + "docstring": "Compute (half of the) Poisson deviance loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py", + "ast_data": "FunctionDef name:poisson_loss arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "image_summary", + "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.image. Note that tf.summary.image uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.')\ndef image_summary(tag, tensor, max_images=3, collections=None, name=None):\n with ops.name_scope(name, 'ImageSummary', [tag, tensor]) as scope:\n val = gen_logging_ops.image_summary(tag=tag, tensor=tensor, max_images=max_images, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val", + "docstring": "Outputs a protocol buffer with images. For an explanation of why this op was deprecated, and information on how to migrate, look ['here']( The summary has up to summary values containing images. The images are built from which must be 4-D with shape and where can be: * 1: is interpreted as Grayscale. * 3: is interpreted as RGB. * 4: is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range . values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The argument is a scalar of type . It is used to build the of the summary values: * If is 1, the summary value tag is '*tag*/image'. * If is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. Args: tag: A scalar of type . Used to build the of the summary values. tensor: A 4-D or of shape where is 1, 3, or 4. max_images: Max number of batch elements to generate images for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py", + "ast_data": "FunctionDef name:image_summary arg:tag arg:tensor arg:max_images arg:collections arg:name arguments arg arg arg arg arg With Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "horizontal_partition", + "source_code": "@staticmethod\ndef horizontal_partition(nodes: list[BaseSchedulerNode], triton_scheduling: SIMDScheduling, kernel_map: dict[BaseSchedulerNode, TritonKernel], node_info_map: dict[BaseSchedulerNode, tuple[Any, Any, Any, Any]], custom_algorithm: bool=False) -> list[list[BaseSchedulerNode]]:\n if custom_algorithm:\n raw_partitions = _custom_combo_kernel_horizontal_partition_algorithm(nodes, triton_scheduling, kernel_map, node_info_map)\n else:\n raw_partitions = [nodes]\n 'Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel)\\n for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args\\n (read/writes) and to have the same 2D or 1D blocking strategy.'\n all_partitions = []\n for raw_partition in raw_partitions:\n all_partitions.extend(ComboKernel._base_horizontal_partition(raw_partition, triton_scheduling, node_info_map, custom_algorithm))\n return all_partitions", + "docstring": "Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnum) for each subkernel node where each sublist forms a ComboKernel. It horizontally partitions nodes into sublists in the following way: 1) call _custom_combo_kernel_horizontal_partition_algorithm() if custom_algorithm is True 2) then, call _base_horizontal_partition() to partition nodes into sublists, each sublist is guaranteed to not exceed CUDA limits for number of args (read/writes) and to have the same 2D or 1D blocking strategy.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py", + "ast_data": "FunctionDef name:horizontal_partition arg:nodes arg:triton_scheduling arg:kernel_map arg:node_info_map arg:custom_algorithm arguments arg arg arg arg arg If Assign Call Assign Assign For Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "release_lock", + "source_code": "def release_lock(self, path=None):\n self.lock.release()\n self.locked = False", + "docstring": "Release the lock on the currently-loaded session data.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:release_lock arg:self arg:path arguments arg arg Call Assign" + }, + { + "library": "tensorflow", + "name": "_TPUPollingThread", + "source_code": "class _TPUPollingThread(threading.Thread):\n\n def __init__(self, cluster, session):\n super(_TPUPollingThread, self).__init__()\n self.daemon = True\n self._running = True\n self._session_closed = False\n self._cluster = cluster\n self._session = session\n self._interval = 30\n for name in ['googleapiclient.discovery', 'oauth2client.client']:\n _logging.getLogger(name).setLevel(_logging.WARNING)\n\n def stop(self):\n self._running = False\n self._session_closed = True\n self.join()\n\n def run(self):\n if not tpu_cluster_resolver.is_running_in_gce():\n logging.warning('TPUPollingThread is running in a non-GCE environment, exiting...')\n self._running = False\n return\n while self._running:\n recoverable = self._cluster._cloud_tpu_client.recoverable()\n if not recoverable:\n logging.warning('TPUPollingThread found TPU %s in state %s', self._cluster._tpu, self._cluster._cloud_tpu_client.state())\n os._exit(1)\n time.sleep(self._interval)", + "docstring": "A thread that polls the state of a TPU node. When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED) that's considered as not recoverable by the underlying infrastructure, it attempts to close the session, and exits the entire process if the session.close() stucks.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\preempted_hook.py", + "ast_data": "ClassDef name:_TPUPollingThread FunctionDef name:__init__ arg:self arg:cluster arg:session arguments arg arg arg Call Call Assign Assign Assign Assign Assign Assign For Call Call FunctionDef name:stop arg:self arguments arg Assign Assign Call FunctionDef name:run arg:self arguments arg If Call Call Assign Return return:no While Assign Call If Call Call Call Call" + }, + { + "library": "pytorch", + "name": "alive", + "source_code": "def alive(self) -> bool:\n return self.running and self.process.poll() is None", + "docstring": "True if the subprocess is still running.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:alive arg:self arguments arg Return return:yes BoolOp Compare Call" + }, + { + "library": "pytorch", + "name": "filter_op", + "source_code": "def filter_op(self, op: 'CKTileGemmOperation'):\n if not self.check_dtypes(op):\n return None\n if not self.check_layouts(op):\n return None\n if not self.check_block_tiles(op):\n return None\n if not self.check_alignments(op):\n return None\n return op", + "docstring": "Determines whether a given op definition is suitable for the current input / output of the operation that this template implements. Filter is based on inputs' dtype, layout and statically inferred size. Returns None if the op is not suitable, otherwise returns the op to be used.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_universal_gemm_template.py", + "ast_data": "FunctionDef name:filter_op arg:self arg:op arguments arg arg If Call Return return:no If Call Return return:no If Call Return return:no If Call Return return:no Return return:yes" + }, + { + "library": "matplotlib", + "name": "setup", + "source_code": "def setup(ax, title):\n ax.yaxis.set_major_locator(ticker.NullLocator())\n ax.spines[['left', 'right', 'top']].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.tick_params(which='major', width=1.0, length=5)\n ax.tick_params(which='minor', width=0.75, length=2.5)\n ax.set_xlim(0, 5)\n ax.set_ylim(0, 1)\n ax.text(0.0, 0.2, title, transform=ax.transAxes, fontsize=14, fontname='Monospace', color='tab:blue')", + "docstring": "Set up common parameters for the Axes in the example.", + "type": "function", + "file_path": "matplotlib\\galleries\\users_explain\\axes\\axes_ticks.py", + "ast_data": "FunctionDef name:setup arg:ax arg:title arguments arg arg Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "output_shapes", + "source_code": "@property\ndef output_shapes(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)", + "docstring": "Returns the shape of each component of an element of this iterator. Returns: A nested structure of objects corresponding to each component of an element of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:output_shapes arg:self arguments arg Return return:yes Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_TRTEngineResource", + "source_code": "class _TRTEngineResource(resource.TrackableResource):\n\n def __init__(self, resource_name, filename, maximum_cached_engines, device='GPU'):\n super(_TRTEngineResource, self).__init__(device=device)\n self._resource_name = resource_name\n self._filename = self._track_trackable(asset.Asset(filename), '_serialized_trt_resource_filename')\n self._maximum_cached_engines = maximum_cached_engines\n\n def _create_resource(self):\n return _get_resource_handle(self._resource_name, self._resource_device)\n\n def _initialize(self):\n gen_trt_ops.initialize_trt_resource(self.resource_handle, self._filename, max_cached_engines_count=self._maximum_cached_engines)\n\n def _destroy_resource(self):\n handle = _get_resource_handle(self._resource_name, self._resource_device)\n with ops.device(self._resource_device):\n gen_resource_variable_ops.destroy_resource_op(handle, ignore_lookup_error=True)", + "docstring": "Class to track the serialized engines resource.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py", + "ast_data": "ClassDef name:_TRTEngineResource FunctionDef name:__init__ arg:self arg:resource_name arg:filename arg:maximum_cached_engines arg:device arguments arg arg arg arg arg Call Call Assign Assign Call Call Assign FunctionDef name:_create_resource arg:self arguments arg Return return:yes Call FunctionDef name:_initialize arg:self arguments arg Call FunctionDef name:_destroy_resource arg:self arguments arg Assign Call With Call Call" + }, + { + "library": "scipy", + "name": "_mwu_input_validation", + "source_code": "def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):\n x, y = (np.atleast_1d(x), np.atleast_1d(y))\n if np.isnan(x).any() or np.isnan(y).any():\n raise ValueError('`x` and `y` must not contain NaNs.')\n if np.size(x) == 0 or np.size(y) == 0:\n raise ValueError('`x` and `y` must be of nonzero size.')\n bools = {True, False}\n if use_continuity not in bools:\n raise ValueError(f'`use_continuity` must be one of {bools}.')\n alternatives = {'two-sided', 'less', 'greater'}\n alternative = alternative.lower()\n if alternative not in alternatives:\n raise ValueError(f'`alternative` must be one of {alternatives}.')\n axis_int = int(axis)\n if axis != axis_int:\n raise ValueError('`axis` must be an integer.')\n if not isinstance(method, stats.PermutationMethod):\n methods = {'asymptotic', 'exact', 'auto'}\n method = method.lower()\n if method not in methods:\n raise ValueError(f'`method` must be one of {methods}.')\n return (x, y, use_continuity, alternative, axis_int, method)", + "docstring": "Input validation and standardization for mannwhitneyu", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py", + "ast_data": "FunctionDef name:_mwu_input_validation arg:x arg:y arg:use_continuity arg:alternative arg:axis arg:method arguments arg arg arg arg arg arg Assign Call Call If BoolOp Call Call Call Call Raise Call If BoolOp Compare Call Compare Call Raise Call Assign If Compare Raise Call Assign Assign Call If Compare Raise Call Assign Call If Compare Raise Call If Call Assign Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "whosmat", + "source_code": "@docfiller\ndef whosmat(file_name, appendmat=True, **kwargs):\n with _open_file_context(file_name, appendmat) as f:\n ML, file_opened = mat_reader_factory(f, **kwargs)\n variables = ML.list_variables()\n return variables", + "docstring": "List variables inside a MATLAB file. Parameters ---------- %(file_arg)s %(append_arg)s %(load_args)s %(struct_arg)s Returns ------- variables : list of tuples A list of tuples, where each tuple holds the matrix name (a string), its shape (tuple of ints), and its data class (a string). Possible data classes are: int8, uint8, int16, uint16, int32, uint32, int64, uint64, single, double, cell, struct, object, char, sparse, function, opaque, logical, unknown. Notes ----- v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. You will need an HDF5 python library to read matlab 7.3 format mat files (e.g. h5py). Because SciPy does not supply one, we do not implement the HDF5 / 7.3 interface here. .. versionadded:: 0.12.0 Examples -------- >>> from io import BytesIO >>> import numpy as np >>> from scipy.io import savemat, whosmat Create some arrays, and use to write them to a `whosmat`. >>> whosmat(f) [('a', (2, 3), 'int32'), ('b', (1, 5), 'double')]", + "type": "function", + "file_path": "scipy\\scipy\\io\\matlab\\_mio.py", + "ast_data": "FunctionDef name:whosmat arg:file_name arg:appendmat arguments arg arg arg With Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "PaddingFIFOQueue", + "source_code": "@tf_export('queue.PaddingFIFOQueue', v1=['queue.PaddingFIFOQueue', 'io.PaddingFIFOQueue', 'PaddingFIFOQueue'])\n@deprecation.deprecated_endpoints(['io.PaddingFIFOQueue', 'PaddingFIFOQueue'])\nclass PaddingFIFOQueue(QueueBase):\n\n def __init__(self, capacity, dtypes, shapes, names=None, shared_name=None, name='padding_fifo_queue'):\n dtypes = _as_type_list(dtypes)\n shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)\n names = _as_name_list(names, dtypes)\n if len(dtypes) != len(shapes):\n raise ValueError(f'Shapes must be provided for all components, but received {len(dtypes)} dtypes and {len(shapes)} shapes.')\n queue_ref = gen_data_flow_ops.padding_fifo_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)", + "docstring": "A FIFOQueue that supports batching variable-sized tensors by padding. A may contain components with dynamic shape, while also supporting . See the constructor for more details. See for a description of the methods on this class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "ClassDef name:PaddingFIFOQueue FunctionDef name:__init__ arg:self arg:capacity arg:dtypes arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call If Compare Call Call Raise Call Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "_LazyTickList", + "source_code": "class _LazyTickList:\n\n def __init__(self, major):\n self._major = major\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n elif self._major:\n instance.majorTicks = []\n tick = instance._get_tick(major=True)\n instance.majorTicks = [tick]\n return instance.majorTicks\n else:\n instance.minorTicks = []\n tick = instance._get_tick(major=False)\n instance.minorTicks = [tick]\n return instance.minorTicks", + "docstring": "A descriptor for lazy instantiation of tick lists. See comment above definition of the `` attributes.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "ClassDef name:_LazyTickList FunctionDef name:__init__ arg:self arg:major arguments arg arg Assign FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg If Compare Return return:yes If Assign Assign Call Assign Return return:yes Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "make_list_of_t", + "source_code": "def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):\n if isinstance(ts, ops.Graph):\n if allow_graph:\n return get_tensors(ts)\n else:\n raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n else:\n if not is_iterable(ts):\n ts = [ts]\n if not ts:\n return []\n if check_graph:\n check_types = None if ignore_ops else tensor_lib.Tensor\n get_unique_graph(ts, check_types=check_types)\n return [t for t in ts if isinstance(t, tensor_lib.Tensor)]", + "docstring": "Convert ts to a list of . Args: ts: can be an iterable of , a or a single tensor. check_graph: if check if all the tensors belong to the same graph. allow_graph: if a cannot be converted. ignore_ops: if , silently ignore . Returns: A newly created list of . Raises: TypeError: if cannot be converted to a list of or, if is , if all the ops do not belong to the same graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py", + "ast_data": "FunctionDef name:make_list_of_t arg:ts arg:check_graph arg:allow_graph arg:ignore_ops arguments arg arg arg arg If Call If Return return:yes Call Raise Call If Call Assign If Return return:no If Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Event", + "source_code": "class Event:\n\n def __init__(self, name, canvas, guiEvent=None):\n self.name = name\n self.canvas = canvas\n self.guiEvent = guiEvent\n\n def _process(self):\n self.canvas.callbacks.process(self.name, self)\n self.guiEvent = None", + "docstring": "A Matplotlib event. The following attributes are defined and shown with their default values. Subclasses may define additional attributes. Attributes ---------- name : str The event name. canvas : The backend-specific canvas instance generating the event. guiEvent The GUI event that triggered the Matplotlib event.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "ClassDef name:Event FunctionDef name:__init__ arg:self arg:name arg:canvas arg:guiEvent arguments arg arg arg arg Assign Assign Assign FunctionDef name:_process arg:self arguments arg Call Assign" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "return_outputs_and_add_losses", + "source_code": "def return_outputs_and_add_losses(*args, **kwargs):\n if return_method:\n args = args[1:]\n outputs, losses = fn(*args, **kwargs)\n layer.add_loss(losses, inputs=True)\n if context.executing_eagerly():\n for i in layer._flatten_layers():\n if i is not layer:\n i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n return outputs", + "docstring": "Returns the outputs from the layer call function, and adds the losses.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py", + "ast_data": "FunctionDef name:return_outputs_and_add_losses arguments arg arg If Assign Assign Call Call If Call For Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "ctc_greedy_decoder", + "source_code": "@tf_export('nn.ctc_greedy_decoder')\n@dispatch.add_dispatch_support\ndef ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True, blank_index=None):\n outputs = gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated=merge_repeated, blank_index=blank_index)\n decoded_ix, decoded_val, decoded_shape, log_probabilities = outputs\n return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)], log_probabilities)", + "docstring": "Performs greedy decoding on the logits given in input (best path). Given a tensor as , the parameter defines the class index of the blank symbol. For example: If is equal to 1: >>> inf = float(\"inf\") >>> logits = tf.constant([[[ 0., -inf, -inf], ... [ -2.3, -inf, -0.1]], ... [[ -inf, -0.5, -inf], ... [ -inf, -inf, -0.1]], ... [[ -inf, -inf, -inf], ... [ -0.1, -inf, -2.3]]]) >>> seq_lens = tf.constant([2, 3]) >>> outputs = tf.nn.ctc_greedy_decoder( ... logits, ... seq_lens, ... blank_index=1) Notes: - Unlike , considers blanks as regular elements when computing the probability of a sequence. - Default is , unless overriden. If is , merge repeated classes in output. This means that if consecutive logits' maximum indices are the same, only the first of these is emitted. The sequence (where '*' is the blank label) becomes * if . * if . Args: inputs: 3-D sized . The logits. sequence_length: 1-D vector containing sequence lengths, having size . merge_repeated: Boolean. Default: True. blank_index: (Optional). Default: . Define the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_greedy_decoder behavior of using num_classes - 1 for the blank symbol, which corresponds to the default. Returns: A tuple where decoded: A single-element list. is an containing the decoded outputs s.t.: : Indices matrix . The rows store: . : Values vector, size . The vector stores the decoded classes. : Shape vector, size . The shape values are: neg_sum_logits: A matrix containing, for the sequence found, the negative of the sum of the greatest logit at each timeframe.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py", + "ast_data": "FunctionDef name:ctc_greedy_decoder arg:inputs arg:sequence_length arg:merge_repeated arg:blank_index arguments arg arg arg arg Assign Call Assign Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, sk, yk):\n if sk.shape != yk.shape or sk.ndim != 2:\n raise ValueError('sk and yk must have matching shape, (n_corrs, n)')\n n_corrs, n = sk.shape\n super().__init__(dtype=np.float64, shape=(n, n))\n self.sk = sk\n self.yk = yk\n self.n_corrs = n_corrs\n self.rho = 1 / np.einsum('ij,ij->i', sk, yk)", + "docstring": "Construct the operator.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sk arg:yk arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Call Call Assign Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "_neg", + "source_code": "@deprecation.deprecated('2016-12-30', '`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`')\ndef _neg(x, name=None):\n return negative(x, name)", + "docstring": "Computes numerical negative value element-wise. I.e., \\(y = -x\\). Args: x: A or . Must be one of the following types: , , , , , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:_neg arg:x arg:name arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "DblFromGeom", + "source_code": "class DblFromGeom(GEOSFuncFactory):\n restype = c_int\n errcheck = staticmethod(check_dbl)", + "docstring": "Argument is a Geometry, return type is double that is passed in by reference as the last argument.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\misc.py", + "ast_data": "ClassDef name:DblFromGeom Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "convert_per_replica_to_dtensor", + "source_code": "def convert_per_replica_to_dtensor(per_replica_value, mesh):\n values = per_replica_value.values\n if isinstance(values[0], (float, int)):\n rank = 0\n else:\n rank = len(values[0].shape)\n if rank == 0:\n result = []\n for v in values:\n result.append(array_ops.expand_dims_v2(v, axis=0))\n rank += 1\n else:\n result = list(values)\n batch_layout = layout.Layout.batch_sharded(mesh, batch_dim=DEFAULT_BATCH_MESH_DIM_NAME, rank=rank)\n return d_api.pack(result, batch_layout)", + "docstring": "Convert a PerReplica result to a DTensor instance. Args: per_replica_value: A PerReplica instance whose value will be converted to DTensor. mesh: The mesh used for layout creation. Returns: A DTensor instance that packed from per_replica_value with batch sharded layout.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_util.py", + "ast_data": "FunctionDef name:convert_per_replica_to_dtensor arg:per_replica_value arg:mesh arguments arg arg Assign If Call Assign Assign Call If Compare Assign For Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_indicator_column", + "source_code": "def _indicator_column(categorical_column):\n return _IndicatorColumn(categorical_column)", + "docstring": "Represents multi-hot representation of given categorical column. - For DNN model, can be used to wrap any (e.g., to feed to DNN). Consider to Use if the number of buckets/unique(values) are large. - For Wide (aka linear) model, is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to . See for details. Args: categorical_column: A which is created by or functions. Returns: An .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_indicator_column arg:categorical_column arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "camera_matrix", + "source_code": "@property\ndef camera_matrix(self) -> Tensor:\n return self.intrinsics[..., :3, :3]", + "docstring": "Return the 3x3 camera matrix containing the intrinsics. Returns: tensor of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:camera_matrix arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "apply_mask", + "source_code": "def apply_mask(self, module):\n assert self._tensor_name is not None, f'Module {module} has to be pruned'\n mask = getattr(module, self._tensor_name + '_mask')\n orig = getattr(module, self._tensor_name + '_orig')\n pruned_tensor = mask.to(dtype=orig.dtype) * orig\n return pruned_tensor", + "docstring": "Simply handles the multiplication between the parameter being pruned and the generated mask. Fetches the mask and the original tensor from the module and returns the pruned version of the tensor. Args: module (nn.Module): module containing the tensor to prune Returns: pruned_tensor (torch.Tensor): pruned version of the input tensor", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:apply_mask arg:self arg:module arguments arg arg Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "_is_valid_arg", + "source_code": "def _is_valid_arg(self, arg: Any) -> bool:\n if isinstance(arg, (str,)) and os.path.exists(arg):\n return True\n if isinstance(arg, (Tensor,)):\n return True\n if isinstance(arg, (np.ndarray,)):\n return True\n if isinstance(arg, Image.Image):\n return True\n return False", + "docstring": "Check if the argument is a valid type for conversion. Args: arg: The argument to check. Returns: bool: True if valid, False otherwise.", + "type": "method", + "file_path": "kornia\\kornia\\core\\module.py", + "ast_data": "FunctionDef name:_is_valid_arg arg:self arg:arg arguments arg arg If BoolOp Call Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "run_forward", + "source_code": "def run_forward(self, num_runs, print_per_iter, cuda_sync):\n if print_per_iter:\n for _ in range(num_runs):\n start_time = time.time()\n self.output = self.op_bench.forward_impl()\n if cuda_sync:\n torch.cuda.synchronize(torch.cuda.current_device())\n end_time = time.time()\n self.time_series.append((end_time - start_time) * 1000.0)\n else:\n for _ in range(num_runs):\n self.output = self.op_bench.forward_impl()\n if cuda_sync:\n torch.cuda.synchronize(torch.cuda.current_device())", + "docstring": "Run the forward path of an op with eager mode", + "type": "method", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:run_forward arg:self arg:num_runs arg:print_per_iter arg:cuda_sync arguments arg arg arg arg If For Call Assign Call Assign Call If Call Call Assign Call Call For Call Assign Call If Call Call" + }, + { + "library": "tensorflow", + "name": "_IgnoreErrorsDataset", + "source_code": "class _IgnoreErrorsDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n def __init__(self, input_dataset, log_warning, name=None):\n self._input_dataset = input_dataset\n self._name = name\n variant_tensor = gen_experimental_dataset_ops.ignore_errors_dataset(self._input_dataset._variant_tensor, log_warning=log_warning, **self._flat_structure)\n super().__init__(input_dataset, variant_tensor)", + "docstring": "A that drops erroneous elements from its input.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\ignore_errors_op.py", + "ast_data": "ClassDef name:_IgnoreErrorsDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:log_warning arg:name arguments arg arg arg arg Assign Assign Assign Call Call Call" + }, + { + "library": "scipy", + "name": "StyblinskiTang", + "source_code": "class StyblinskiTang(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n self.global_optimum = [[-2.90353401818596 for _ in range(self.N)]]\n self.fglob = -39.16616570377142 * self.N\n\n def fun(self, x, *args):\n self.nfev += 1\n return sum(x ** 4 - 16 * x ** 2 + 5 * x) / 2", + "docstring": "StyblinskiTang objective function. This class defines the Styblinski-Tang [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{StyblinskiTang}}(x) = \\sum_{i=1}^{n} \\left(x_i^4 - 16x_i^2 + 5x_i \\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:StyblinskiTang Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "int_to_base36", + "source_code": "def int_to_base36(i):\n char_set = '0123456789abcdefghijklmnopqrstuvwxyz'\n if i < 0:\n raise ValueError('Negative base36 conversion input.')\n if i < 36:\n return char_set[i]\n b36 = ''\n while i != 0:\n i, n = divmod(i, 36)\n b36 = char_set[n] + b36\n return b36", + "docstring": "Convert an integer to a base36 string.", + "type": "function", + "file_path": "django\\django\\utils\\http.py", + "ast_data": "FunctionDef name:int_to_base36 arg:i arguments arg Assign If Compare Raise Call If Compare Return return:yes Assign While Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "trace_model_call", + "source_code": "def trace_model_call(model, input_signature=None):\n if input_signature is None:\n if isinstance(model.call, def_function.Function):\n input_signature = model.call.input_signature\n if input_signature is None:\n input_signature = model_input_signature(model)\n if input_signature is None:\n raise_model_input_error(model)\n\n @def_function.function(input_signature=input_signature)\n def _wrapped_model(*args):\n inputs = args[0] if len(input_signature) == 1 else list(args)\n with base_layer_utils.call_context().enter(model, inputs=inputs, build_graph=False, training=False, saving=True):\n outputs = model(inputs, training=False)\n output_names = model.output_names\n if output_names is None:\n from tensorflow.python.keras.engine import compile_utils\n output_names = compile_utils.create_pseudo_output_names(outputs)\n outputs = nest.flatten(outputs)\n return {name: output for name, output in zip(output_names, outputs)}\n return _wrapped_model", + "docstring": "Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py", + "ast_data": "FunctionDef name:trace_model_call arg:model arg:input_signature arguments arg arg If Compare If Call Assign If Compare Assign Call If Compare Call FunctionDef name:_wrapped_model arguments arg Assign Compare Call Call With Call Call Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "real", + "source_code": "@property\ndef real(self):\n result = self._data.real.view(type(self))\n result.__setmask__(self._mask)\n return result", + "docstring": "The real part of the masked array. This property is a view on the real part of this . See Also -------- imag Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20)", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:real arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "mpl_connect", + "source_code": "def mpl_connect(self, s, func):\n return self.callbacks.connect(s, func)", + "docstring": "Bind function *func* to event *s*. Parameters ---------- s : str One of the following events ids: - 'button_press_event' - 'button_release_event' - 'draw_event' - 'key_press_event' - 'key_release_event' - 'motion_notify_event' - 'pick_event' - 'resize_event' - 'scroll_event' - 'figure_enter_event', - 'figure_leave_event', - 'axes_enter_event', - 'axes_leave_event' - 'close_event'. func : callable The callback function to be executed, which must have the signature:: def func(event: Event) -> Any For the location events (button and key press/release), if the mouse is over the Axes, the `~matplotlib.axes.Axes.KeyEvent.MouseEvent.FigureCanvasBase.mpl_disconnect`. Examples -------- :: def on_press(event): print('you pressed', event.button, event.xdata, event.ydata) cid = canvas.mpl_connect('button_press_event', on_press)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:mpl_connect arg:self arg:s arg:func arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "check_function_argument_count", + "source_code": "def check_function_argument_count(func, input_arity, infeed_queue):\n\n def format_error(complaint, quantity):\n return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's')\n num_args_supplied = input_arity\n if infeed_queue is not None:\n num_args_supplied += infeed_queue.number_of_tuple_elements\n arg_spec = tf_inspect.getargspec(func)\n num_func_args = len(arg_spec.args)\n if arg_spec.defaults is None:\n num_func_defaults = 0\n else:\n num_func_defaults = len(arg_spec.defaults)\n min_func_args = num_func_args - num_func_defaults\n if num_args_supplied < min_func_args:\n if num_func_defaults == 0 and arg_spec.varargs is None:\n return format_error('exactly', num_func_args)\n else:\n return format_error('at least', min_func_args)\n if arg_spec.varargs is None and num_args_supplied > num_func_args:\n if num_func_defaults == 0:\n return format_error('exactly', num_func_args)\n else:\n return format_error('at most', num_func_args)\n return None", + "docstring": "Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py", + "ast_data": "FunctionDef name:check_function_argument_count arg:func arg:input_arity arg:infeed_queue arguments arg arg arg FunctionDef name:format_error arg:complaint arg:quantity arguments arg arg Return return:yes Compare Assign If Compare Assign Call Assign Call If Compare Assign Assign Call Assign If Compare If BoolOp Compare Compare Return return:yes Call Return return:yes Call If BoolOp Compare Compare If Compare Return return:yes Call Return return:yes Call Return return:no" + }, + { + "library": "django", + "name": "nodata_value", + "source_code": "@nodata_value.setter\ndef nodata_value(self, value):\n if value is None:\n capi.delete_band_nodata_value(self._ptr)\n elif not isinstance(value, (int, float)):\n raise ValueError('Nodata value must be numeric or None.')\n else:\n capi.set_band_nodata_value(self._ptr, value)\n self._flush()", + "docstring": "Set the nodata value for this band.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:nodata_value arg:self arg:value arguments arg arg If Compare Call If Call Raise Call Call Call" + }, + { + "library": "pytorch", + "name": "Dropout", + "source_code": "class Dropout(_DropoutNd):\n\n def forward(self, input: Tensor) -> Tensor:\n return F.dropout(input, self.p, self.training, self.inplace)", + "docstring": "During training, randomly zeroes some of the elements of the input tensor with probability :attr:. The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution. Each channel will be zeroed out independently on every forward call. This has proven to be an effective technique for regularization and preventing the co-adaptation of neurons as described in the paper _ . Furthermore, the outputs are scaled by a factor of :math: during training. This means that during evaluation the module simply computes an identity function. Args: p: probability of an element to be zeroed. Default: 0.5 inplace: If set to `(*)(*)`. Output is of the same shape as input Examples:: >>> m = nn.Dropout(p=0.2) >>> input = torch.randn(20, 16) >>> output = m(input) .. _Improving neural networks by preventing co-adaptation of feature detectors:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\dropout.py", + "ast_data": "ClassDef name:Dropout FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "adamax", + "source_code": "@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamax)\ndef adamax(params: list[Tensor], grads: list[Tensor], exp_avgs: list[Tensor], exp_infs: list[Tensor], state_steps: list[Tensor], foreach: Optional[bool]=None, maximize: bool=False, differentiable: bool=False, capturable: bool=False, has_complex: bool=False, *, eps: float, beta1: float, beta2: float, lr: float, weight_decay: float):\n if not torch.compiler.is_compiling() and (not all((isinstance(t, torch.Tensor) for t in state_steps))):\n raise RuntimeError('API has changed, `state_steps` argument must contain a list of singleton tensors')\n if foreach is None:\n _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n if foreach and torch.jit.is_scripting():\n raise RuntimeError('torch.jit.script not supported with foreach optimizers')\n if foreach and (not torch.jit.is_scripting()):\n func = _multi_tensor_adamax\n else:\n func = _single_tensor_adamax\n func(params, grads, exp_avgs, exp_infs, state_steps, eps=eps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize, differentiable=differentiable, has_complex=has_complex, capturable=capturable)", + "docstring": "Functional API that performs adamax algorithm computation. See :class: for details.", + "type": "function", + "file_path": "pytorch\\torch\\optim\\adamax.py", + "ast_data": "FunctionDef name:adamax arg:params arg:grads arg:exp_avgs arg:exp_infs arg:state_steps arg:foreach arg:maximize arg:differentiable arg:capturable arg:has_complex arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If BoolOp Call Call Call Raise Call If Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Assign Call Call" + }, + { + "library": "kornia", + "name": "init_ray_dataset", + "source_code": "def init_ray_dataset(self, num_img_rays: Optional[Tensor]=None) -> None:\n if num_img_rays is None:\n self._init_uniform_ray_dataset()\n else:\n self._init_random_ray_dataset(num_img_rays)", + "docstring": "Initialize a ray dataset. Args: num_img_rays: If not None, number of rays to randomly cast from each camera: math: .", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\data_utils.py", + "ast_data": "FunctionDef name:init_ray_dataset arg:self arg:num_img_rays arguments arg arg If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_CalibrationAlgorithmBase", + "source_code": "class _CalibrationAlgorithmBase(abc.ABC):\n\n def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions):\n self._statistics = statistics\n self._calib_opts = calib_opts\n\n @abc.abstractmethod\n def get_min_max_value(self) -> tuple[float, float]:\n pass", + "docstring": "Abstract base class for calibration algorithm.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py", + "ast_data": "ClassDef name:_CalibrationAlgorithmBase FunctionDef name:__init__ arg:self arg:statistics arg:calib_opts arguments arg arg arg Assign Assign FunctionDef name:get_min_max_value arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "set_float_to_observed_mapping", + "source_code": "def set_float_to_observed_mapping(self, float_class: type, observed_class: type, quant_type: QuantType=QuantType.STATIC) -> PrepareCustomConfig:\n if quant_type != QuantType.STATIC:\n raise ValueError('set_float_to_observed_mapping is currently only supported for static quantization')\n if quant_type not in self.float_to_observed_mapping:\n self.float_to_observed_mapping[quant_type] = {}\n self.float_to_observed_mapping[quant_type][float_class] = observed_class\n return self", + "docstring": "Set the mapping from a custom float module class to a custom observed module class. The observed module class must have a `` class method that converts the float module class to the observed module class. This is currently only supported for static quantization.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", + "ast_data": "FunctionDef name:set_float_to_observed_mapping arg:self arg:float_class arg:observed_class arg:quant_type arguments arg arg arg arg If Compare Raise Call If Compare Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_remove_unneccessary_copy_op_pass", + "source_code": "def _remove_unneccessary_copy_op_pass(gm: torch.fx.GraphModule, new_graph_signature: ExportGraphSignature) -> tuple[torch.fx.GraphModule, ExportGraphSignature]:\n with gm._set_replace_hook(new_graph_signature.get_replace_hook()):\n for node in gm.graph.nodes:\n if node.op == 'output':\n args, _ = pytree.tree_flatten(node.args)\n for out in args:\n if isinstance(out, torch.fx.Node) and out.name in new_graph_signature.buffers_to_mutate:\n if out.op == 'call_function' and out.target == torch.ops.aten.copy.default:\n out.replace_all_uses_with(out.args[1])\n gm.graph.erase_node(out)\n gm.recompile()\n return (gm, new_graph_signature)", + "docstring": "Removes redundant copy_ node that was introduced due to mutated buffer.", + "type": "function", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:_remove_unneccessary_copy_op_pass arg:gm arg:new_graph_signature arguments arg arg With Call Call For If Compare Assign Call For If BoolOp Call Compare If BoolOp Compare Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_MulNoNanGrad", + "source_code": "@ops.RegisterGradient('MulNoNan')\ndef _MulNoNanGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n if isinstance(grad, tensor.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad):\n return (gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad))\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, ' vs. ', y.dtype)\n gx = gen_math_ops.mul_no_nan(grad, y)\n gy = gen_math_ops.mul_no_nan(x, grad)\n return _ReduceGradientArgs(x, y, gx, gy)", + "docstring": "The gradient of scalar multiplication with NaN-suppression.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_MulNoNanGrad arg:op arg:grad arguments arg arg Assign Assign If BoolOp Call Call Return return:yes Call Call Compare Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "roots_chebyt", + "source_code": "def roots_chebyt(n, mu=False):\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2 * m))\n w = np.full_like(x, pi / m)\n if mu:\n return (x, w, pi)\n else:\n return (x, w)", + "docstring": "Gauss-Chebyshev (first kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the first kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.4 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad numpy.polynomial.chebyshev.chebgauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:roots_chebyt arg:n arg:mu arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Assign Call Call Assign Call If Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, xy, width, height, *, edgecolor='k', facecolor='w', fill=True, text='', loc='right', fontproperties=None, visible_edges='closed'):\n super().__init__(xy, width=width, height=height, fill=fill, edgecolor=edgecolor, facecolor=facecolor)\n self.set_clip_on(False)\n self.visible_edges = visible_edges\n self._loc = loc\n self._text = Text(x=xy[0], y=xy[1], clip_on=False, text=text, fontproperties=fontproperties, horizontalalignment=loc, verticalalignment='center')", + "docstring": "Parameters ---------- xy : 2-tuple The position of the bottom left corner of the cell. width : float The cell width. height : float The cell height. edgecolor : :mpltype:, default: 'k' The color of the cell border. facecolor : :mpltype:, default: 'w' The cell facecolor. fill : bool, default: True Whether the cell background is filled. text : str, optional The cell text. loc : {'right', 'center', 'left'} The alignment of the text within the cell. fontproperties : dict, optional A dict defining the font properties of the text. Supported keys and values are the keyword arguments accepted by . visible_edges : {'closed', 'open', 'horizontal', 'vertical'} or substring of 'BRTL' The cell edges to be drawn with a line: a substring of 'BRTL' (bottom, right, top, left), or one of 'open' (no edges drawn), 'closed' (all edges drawn), 'horizontal' (bottom and top), 'vertical' (right and left).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Call Assign Assign Assign Call" + }, + { + "library": "scrapy", + "name": "format_part_strings", + "source_code": "def format_part_strings(self, part_strings: list[str]) -> list[str]:\n if part_strings and part_strings[0].startswith('usage: '):\n part_strings[0] = 'Usage\\n=====\\n ' + part_strings[0][len('usage: '):]\n headings = [i for i in range(len(part_strings)) if part_strings[i].endswith(':\\n')]\n for index in headings[::-1]:\n char = '-' if 'Global Options' in part_strings[index] else '='\n part_strings[index] = part_strings[index][:-2].title()\n underline = ''.join(['\\n', char * len(part_strings[index]), '\\n'])\n part_strings.insert(index + 1, underline)\n return part_strings", + "docstring": "Underline and title case command line help message headers.", + "type": "method", + "file_path": "scrapy\\scrapy\\commands\\__init__.py", + "ast_data": "FunctionDef name:format_part_strings arg:self arg:part_strings arguments arg arg If BoolOp Call Assign Call Assign Call Call Call For Assign Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_count", + "source_code": "@abstractmethod\ndef _count(self, X, Y):\n pass", + "docstring": "Update counts that are used to calculate probabilities. The counts make up a sufficient statistic extracted from the data. Accordingly, this method is called each time or update the model. and must be updated here along with any model specific counts. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. Y : ndarray of shape (n_samples, n_classes) Binarized class labels.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:_count arg:self arg:X arg:Y arguments arg arg arg" + }, + { + "library": "scrapy", + "name": "build_from_crawler", + "source_code": "def build_from_crawler(objcls: type[T], crawler: Crawler, /, *args: Any, **kwargs: Any) -> T:\n if hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n warnings.warn(f'{objcls.__qualname__} has from_settings() but not from_crawler(). This is deprecated and calling from_settings() will be removed in a future Scrapy version. You can implement a simple from_crawler() that calls from_settings() with crawler.settings.', category=ScrapyDeprecationWarning, stacklevel=2)\n instance = objcls.from_settings(crawler.settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(f'{objcls.__qualname__}.{method_name} returned None')\n return cast(T, instance)", + "docstring": "Construct a class instance using its ``.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:build_from_crawler arguments arg arg arg arg If Call Assign Call Assign If Call Call Assign Call Assign Assign Call Assign If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "parse_arguments", + "source_code": "def parse_arguments():\n from argparse import ArgumentParser\n parser = ArgumentParser('AARCH64 wheels python CD')\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--build-only', action='store_true')\n parser.add_argument('--test-only', type=str)\n parser.add_argument('--enable-mkldnn', action='store_true')\n parser.add_argument('--enable-cuda', action='store_true')\n return parser.parse_args()", + "docstring": "Parse inline arguments", + "type": "function", + "file_path": "pytorch\\.ci\\aarch64_linux\\aarch64_wheel_ci_build.py", + "ast_data": "FunctionDef name:parse_arguments arguments Assign Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_add_new_centers", + "source_code": "def _add_new_centers(self):\n new_centers = self._choose_initial_centers()\n if self._distance_metric == COSINE_DISTANCE:\n new_centers = nn_impl.l2_normalize(new_centers, dim=1)\n all_centers = cond.cond(math_ops.equal(self._num_selected, 0), lambda: new_centers, lambda: array_ops.concat([self._cluster_centers, new_centers], 0))\n a = state_ops.assign(self._cluster_centers, all_centers, validate_shape=False)\n if self._cluster_centers_updated is not self._cluster_centers:\n a = state_ops.assign(self._cluster_centers_updated, a, validate_shape=False)\n return self._num_clusters - array_ops.shape(a)[0]", + "docstring": "Adds some centers and returns the number of centers remaining.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py", + "ast_data": "FunctionDef name:_add_new_centers arg:self arguments arg Assign Call If Compare Assign Call Assign Call Call arguments arguments Call Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "functions_run_eagerly", + "source_code": "@tf_export('config.functions_run_eagerly')\ndef functions_run_eagerly():\n return RUN_FUNCTIONS_EAGERLY", + "docstring": "Returns the value of the setting.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\eager_function_run.py", + "ast_data": "FunctionDef name:functions_run_eagerly arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n return get_routing_for_object(self._metadata_request)", + "docstring": "Get requested data properties. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "assert_is_batch_matrix", + "source_code": "def assert_is_batch_matrix(tensor):\n sh = tensor.shape\n if sh.ndims is not None and sh.ndims < 2:\n raise ValueError(f'Expected [batch] matrix to have at least two dimensions. Found: {tensor}.')", + "docstring": "Static assert that has rank or higher.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", + "ast_data": "FunctionDef name:assert_is_batch_matrix arg:tensor arguments arg Assign If BoolOp Compare Compare Raise Call" + } +] \ No newline at end of file