query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Marshal functions for a python primitive. This is the base case for our recursive function.
|
def primitive_marshal_funs(type_ref):
assert type_ref.type_ in python_primitives
return ('identity', 'identity')
|
[
"def addPrimitive(primitive):",
"def native(self) -> Structure:",
"def RuntimeType(self) -> _n_0_t_0:",
"def encode_primitive(self, validator, value):\n # type: (bv.Primitive, typing.Any) -> typing.Any\n raise NotImplementedError",
"def get_pack_py_def(self, indent=4, level=0):\n self.code(\"def pack(self):\")\n self.indent()\n self.code(\"\\\"\\\"\\\"Pack each bit of bitfield and return packed integer.\\\"\\\"\\\"\")\n self.code(\"ret = 0\")\n bits = list(self.bits)\n bits.sort()\n bits.reverse()\n for b in bits:\n self.code(\"ret |= self.%s.pack()\" % (b.name))\n self.code(\"return ret\")\n return self.current_code",
"def encode(v: Any) -> Any:\n if v is None:\n return None\n\n if isinstance(v, (float, int, str)):\n return v\n\n # check for namedtuples first, to encode them not as plain tuples\n if isinstance(v, tuple) and hasattr(v, \"_asdict\"):\n v = cast(NamedTuple, v)\n return {\n \"__kind__\": Kind.Instance,\n \"class\": fqname_for(v.__class__),\n \"kwargs\": encode(v._asdict()),\n }\n\n if isinstance(v, (tuple, set)):\n return {\n \"__kind__\": Kind.Instance,\n \"class\": fqname_for(type(v)),\n \"args\": [list(map(encode, v))],\n }\n\n if isinstance(v, list):\n return list(map(encode, v))\n\n if isinstance(v, dict):\n return valmap(encode, v)\n\n if isinstance(v, type):\n return {\"__kind__\": Kind.Type, \"class\": fqname_for(v)}\n\n if hasattr(v, \"__getnewargs_ex__\"):\n args, kwargs = v.__getnewargs_ex__() # mypy: ignore\n\n return {\n \"__kind__\": Kind.Instance,\n \"class\": fqname_for(v.__class__),\n # args need to be a list, since we encode tuples explicitly\n \"args\": encode(list(args)),\n \"kwargs\": encode(kwargs),\n }\n\n try:\n # as fallback, we try to just take the path of the value\n fqname = fqname_for(v)\n assert (\n \"<lambda>\" not in fqname\n ), f\"Can't serialize lambda function {fqname}\"\n\n if hasattr(v, \"__self__\") and hasattr(v, \"__func__\"):\n # v is a method\n # to model`obj.method`, we encode `getattr(obj, \"method\")`\n return {\n \"__kind__\": Kind.Instance,\n \"class\": fqname_for(getattr),\n \"args\": encode((v.__self__, v.__func__.__name__)),\n }\n\n return {\"__kind__\": Kind.Type, \"class\": fqname_for(v)}\n except AttributeError:\n pass\n\n raise RuntimeError(bad_type_msg.format(fqname_for(v.__class__)))",
"def Value(self) -> UnmanagedType:",
"def map_marshal_funs(type_ref):\n assert type_ref.type_ == 'Map'\n\n type_params_dict = dict(type_ref.type_params) \n key_type_ref = type_params_dict['Key']\n #key_marshal, key_unmarshal = type_ref_marshal_funs(key_type_ref)\n # SPECIAL TREATMENTFOR KEYS\n assert key_type_ref.type_ == 'string'\n key_marshal = 'identity'\n key_unmarshal = 'identity'\n \n val_type_ref = type_params_dict['Value']\n val_marshal, val_unmarshal = type_ref_marshal_funs(val_type_ref)\n\n template = 'transform_map(%s, %s)'\n\n marshal_fun = template % (key_marshal, val_marshal)\n unmarshal_fun = template % (key_unmarshal, val_unmarshal)\n \n return marshal_fun, unmarshal_fun",
"def primitive(self, path, *types):\n raise NotImplementedError('{0} does not support primitives!'.format(type(self)))",
"def msg_pack(obj):\n return packb(obj, default=custom_encode, use_bin_type=True)",
"def test_custom_marshal_functions():\n\n def dump(data, instance, field):\n # Instead of {v: 1}, output {1: v}\n data[getattr(instance, field.name)] = field.name\n return data\n\n def load(data, field):\n # Consume all other keys, sum length of all\n sum = 0\n for k, v in data.items():\n sum += len(v)\n\n return sum, list(data.keys())\n\n @model\n class Foo:\n v: int = attrib(metadata={'marshal': custom_marshal(dump, load)})\n\n assert Foo.to_server(Foo(v=1)) == {1: 'v'}\n\n assert Foo.from_server({'x': 'red', 'y': 'blue'}) == Foo(v=7)",
"def _encode_proxy(self, data):\n return [\"__object__\", data.handle]",
"def _generate_pack_op(self):\n obj = self.original_fn.__self__ if self.is_method else None\n fn = self.original_fn.pack_fn\n key = f\"{id(obj)}_{id(fn)}\"\n if self.is_method:\n setattr(obj, self.pack_fn_name, PackFunc(fn, key, obj))\n else:\n fn.__globals__[self.pack_fn_name] = PackFunc(fn, key, obj)",
"def create_primitives(self):\n for tag, ooc_name in TYPE_MAP.iteritems():\n self.objects[tag] = {\n 'class': 'Primitive',\n 'tag': tag,\n 'name': ooc_name,\n 'ooc_name': ooc_name,\n 'c_name': tag,\n 'wrapped': True,\n }",
"def Value(self) -> TypeLibFuncFlags:",
"def test_derived_type(self, native_or_pretty, targets):\n serialized = native_or_pretty.serialize(\"unpickleable\", targets)\n assert serialized == \"UnPickleableInt[42]\"",
"def list_marshal_funs(type_ref):\n assert type_ref.type_ == 'List'\n \n item_type_ref = dict(type_ref.type_params)['Item']\n item_marshal, item_unmarshal = type_ref_marshal_funs(item_type_ref)\n\n template = 'transform_list(%s)'\n marshal_fun = template % item_marshal\n unmarshal_fun = template % item_unmarshal\n\n return marshal_fun, unmarshal_fun",
"def _nativeType(self):\n\t\treturn str",
"def wrap_primitive(val: Any) -> Value:\n # check if int value is 32 bit via fit within max 32 bit int value\n is_int32 = lambda x: -(2 ** 31) <= x < 2 ** 31\n if type(val) in [int, np.int32] and is_int32(val):\n return Value(\n data_type=types.int32,\n primitive=Value.Primitive(int_32=int(val)),\n )\n elif type(val) in [int, np.int64]:\n return Value(\n data_type=types.int64,\n primitive=Value.Primitive(int_64=int(val)),\n )\n # TODO(mrzzy): figure out how to check if value fits within 32 bits\n elif type(val) in [float, np.float64]:\n return Value(\n data_type=types.float64,\n primitive=Value.Primitive(float_64=float(val)),\n )\n elif type(val) in [str, np.str_]:\n return Value(\n data_type=types.string,\n primitive=Value.Primitive(str_val=str(val)),\n )\n elif type(val) in [bool, np.bool_]:\n return Value(\n data_type=types.boolean,\n primitive=Value.Primitive(boolean=bool(val)),\n )\n else:\n raise TypeError(\n f\"{type(val)} is not a supported native primitive type to wrap as Value proto.\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the marshal functions for a map type_ref. These may contain many layers of nested function calls,
|
def map_marshal_funs(type_ref):
assert type_ref.type_ == 'Map'
type_params_dict = dict(type_ref.type_params)
key_type_ref = type_params_dict['Key']
#key_marshal, key_unmarshal = type_ref_marshal_funs(key_type_ref)
# SPECIAL TREATMENTFOR KEYS
assert key_type_ref.type_ == 'string'
key_marshal = 'identity'
key_unmarshal = 'identity'
val_type_ref = type_params_dict['Value']
val_marshal, val_unmarshal = type_ref_marshal_funs(val_type_ref)
template = 'transform_map(%s, %s)'
marshal_fun = template % (key_marshal, val_marshal)
unmarshal_fun = template % (key_unmarshal, val_unmarshal)
return marshal_fun, unmarshal_fun
|
[
"def primitive_marshal_funs(type_ref):\n assert type_ref.type_ in python_primitives\n return ('identity', 'identity')",
"def list_marshal_funs(type_ref):\n assert type_ref.type_ == 'List'\n \n item_type_ref = dict(type_ref.type_params)['Item']\n item_marshal, item_unmarshal = type_ref_marshal_funs(item_type_ref)\n\n template = 'transform_list(%s)'\n marshal_fun = template % item_marshal\n unmarshal_fun = template % item_unmarshal\n\n return marshal_fun, unmarshal_fun",
"def _generate_php_function_maps(self):\n \n fs=['fn0','fn1','fn2','fn3','fn4','fn5','fn6','fn7','fn8','fn9']\n \n delta=0\n \n if len(self.current_functions)==0:\n \n return\n \n delta=len(fs)- len(self.current_functions)- len(PHPApplicationMinify.FUNCTION_LISTS)\n \n flists=[]\n \n if delta<0:\n \n expansion=range(abs(delta))\n \n for k in expansion:\n \n m='fn'+str(k)\n \n flists.append(m)\n \n \n else:\n \n flists=fs\n \n for fn in self.current_functions:\n \n if fn not in PHP_EXCLUDED_FUNCTIONS:\n \n for n in flists:\n \n if not PHPApplicationMinify.FUNCTION_MAPS.has_key(fn) and n not in PHPApplicationMinify.FUNCTION_MAPS.values():\n \n PHPApplicationMinify.FUNCTION_MAPS[fn]=n\n break\n \n continue",
"def conv():\n conv_map = {}\n for name, code in getmembers(converters):\n if isfunction(code):\n conv_map[name] = code\n return conv_map",
"def dump_functions(self):\n funcs = {}\n for i in xrange(16):\n funcs[i] = self.dump_function(i)\n return funcs",
"def _typecasters():\n\n typecasters = dict(_PULLCAST.items())\n typecasters.update({\n field.name: field.pullcast\n for field in _extensions(None, None)\n if field.getter is None\n })\n return defaultdict(lambda: str, typecasters)",
"def _map_write_functions(self, data: pd.DataFrame) -> accepted_methods:\n function_map = {\n \"parquet\": data.to_parquet,\n \"csv\": data.to_csv,\n \"xls\": data.to_excel,\n \"xlsx\": data.to_excel,\n \"dat\": data.to_csv,\n \"data\": data.to_csv\n }\n return function_map.get(self.path.file_type)",
"def _get_module_funcs(mod) -> Dict[str, Callable]:\n return dict(member for member in getmembers(mod, isfunction))",
"def _get_types(attr):\n res = {}\n for extension_module in get_extension_modules():\n contributed = getattr(extension_module, attr, None)\n if callable(contributed):\n res.update(contributed())\n elif isinstance(contributed, dict):\n res.update(contributed)\n return res",
"def _GetAllUsedTypemaps(self):\n used_typemaps = []\n seen_types = set()\n def AddKind(kind):\n if (mojom.IsIntegralKind(kind) or mojom.IsStringKind(kind) or\n mojom.IsDoubleKind(kind) or mojom.IsFloatKind(kind) or\n mojom.IsAnyHandleKind(kind) or\n mojom.IsInterfaceKind(kind) or\n mojom.IsInterfaceRequestKind(kind) or\n mojom.IsAssociatedKind(kind) or\n mojom.IsPendingRemoteKind(kind) or\n mojom.IsPendingReceiverKind(kind)):\n pass\n elif mojom.IsArrayKind(kind):\n AddKind(kind.kind)\n elif mojom.IsMapKind(kind):\n AddKind(kind.key_kind)\n AddKind(kind.value_kind)\n else:\n name = self._GetFullMojomNameForKind(kind)\n if name in seen_types:\n return\n seen_types.add(name)\n\n typemap = self.typemap.get(name, None)\n if typemap:\n used_typemaps.append(typemap)\n if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):\n for field in kind.fields:\n AddKind(field.kind)\n\n for kind in self.module.structs + self.module.unions:\n for field in kind.fields:\n AddKind(field.kind)\n\n for interface in self.module.interfaces:\n for method in interface.methods:\n for parameter in method.parameters + (method.response_parameters or []):\n AddKind(parameter.kind)\n\n return used_typemaps",
"def CodeTypeForMapOf(self, type_name):\n return 'java.util.Map<String, %s>' % type_name",
"def generate_type_mapping(json_schema, base_uri, context_schemas, config):\n resolver = jsonschema.RefResolver(referrer=json_schema,\n store=context_schemas,\n base_uri=base_uri)\n return __gen_type_properties(json_schema, base_uri, resolver, config, {\n '_all': {'enable': config.all_field},\n 'numeric_detection': config.numeric_detection,\n 'date_detection': config.date_detection,\n # empty type mapping\n 'properties': {},\n })",
"def MakeFunctionMap():\r\n\treturn ExtendFunctionMap({})",
"def _get_functions_names(module):\n\n return [name for name in dir(module) if\n isinstance(getattr(module, name, None), types.FunctionType)]",
"def dumps_functions(serializers):\n if serializers is None:\n key_s = None\n value_s = None\n else:\n key_s = serializers.key_s\n value_s = serializers.value_s\n\n if key_s is None:\n dumps_key = functools.partial(pickle.dumps, protocol=-1)\n else:\n dumps_key = key_s.dumps\n\n if value_s is None:\n dumps_value = functools.partial(pickle.dumps, protocol=-1)\n else:\n dumps_value = value_s.dumps\n\n return dumps_key, dumps_value",
"def retFC():\n return funClasses",
"def get_type_shapes(self):\n type_shapes = self.cpp_force.getTypeShapesPy()\n ret = [json.loads(json_string) for json_string in type_shapes]\n return ret",
"def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)",
"def create_function_dict(self, conf):\n all_funcs = process_args(conf,\n factory=self,\n str_keys=['type', 'path'])\n\n funcs_dict = {}\n for k, v in all_funcs.items():\n if isinstance(v, dict):\n f_type = v.pop('type')\n funcs_dict[k.lower()] = self.create_function(f_type, **v)\n else:\n funcs_dict[k.lower()] = v\n\n return funcs_dict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the marshal functions for a list data type.
|
def list_marshal_funs(type_ref):
assert type_ref.type_ == 'List'
item_type_ref = dict(type_ref.type_params)['Item']
item_marshal, item_unmarshal = type_ref_marshal_funs(item_type_ref)
template = 'transform_list(%s)'
marshal_fun = template % item_marshal
unmarshal_fun = template % item_unmarshal
return marshal_fun, unmarshal_fun
|
[
"def get_function_record_types(self):\n return # osid.type.TypeList",
"def get_function_search_record_types(self):\n return # osid.type.TypeList",
"def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]",
"def getListItemTypes(self, *args) -> \"SoTypeList const &\":\n return _coin.SoNodekitCatalog_getListItemTypes(self, *args)",
"def marshal_list_with(self, model, enveloppe):\n def wrapper(fn, *args, **kwargs):\n import pdb; pdb.set_trace()\n fn(*args, **kwargs)\n\n return wrapper",
"def PackList(list_):\n packed = []\n for l in list_:\n if not \"_pack\" in l.__dict__:\n continue\n else:\n packed.append(l._pack())",
"def primitive_marshal_funs(type_ref):\n assert type_ref.type_ in python_primitives\n return ('identity', 'identity')",
"def get_proxy_record_types(self):\n return # osid.type.TypeList",
"def GetDescriptorFuncs(self):\n res = []\n for nm in self.simpleList:\n fn = getattr(DescriptorsMod, nm, lambda x: 777)\n res.append(fn)\n return tuple(res)",
"def CodeTypeForArrayOf(self, type_name):\n return 'java.util.List<%s>' % type_name",
"def get_parameter_record_types(self):\n return # osid.type.TypeList",
"def get_map_record_types(self):\n return # osid.type.TypeList",
"async def infer_type_maplist(engine, f, xs):\n f_t = await f['type']\n xs_t = await xs['type']\n if not isinstance(xs_t, List):\n raise MyiaTypeError('Expect list for maplist')\n xref = engine.vref(dict(type=xs_t.element_type))\n ret_t = await f_t(xref)\n return List(ret_t)",
"def _getDataTypes(self):\r\n \r\n result = list()\r\n for dataType in self._dataTypes.values():\r\n result.append(deepcopy(dataType))\r\n return result",
"def getSortedDataTypeList(self) -> List[ghidra.program.model.data.DataType]:\n ...",
"def get_value_record_types(self):\n return # osid.type.TypeList",
"def map_marshal_funs(type_ref):\n assert type_ref.type_ == 'Map'\n\n type_params_dict = dict(type_ref.type_params) \n key_type_ref = type_params_dict['Key']\n #key_marshal, key_unmarshal = type_ref_marshal_funs(key_type_ref)\n # SPECIAL TREATMENTFOR KEYS\n assert key_type_ref.type_ == 'string'\n key_marshal = 'identity'\n key_unmarshal = 'identity'\n \n val_type_ref = type_params_dict['Value']\n val_marshal, val_unmarshal = type_ref_marshal_funs(val_type_ref)\n\n template = 'transform_map(%s, %s)'\n\n marshal_fun = template % (key_marshal, val_marshal)\n unmarshal_fun = template % (key_unmarshal, val_unmarshal)\n \n return marshal_fun, unmarshal_fun",
"def listify(gen):\n\n def patched(*args, **kwargs):\n \"\"\"Wrapper function\"\"\"\n return list(gen(*args, **kwargs))\n\n return patched",
"def list_packet_types():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
inserting numbers neighboring the insects in the raw_array with only insect data
|
def populateWithNumbers(self, raw_array, scaling):
for y in range(self.height):
for x in range(self.width):
if raw_array[y][x] == -1:
for i in range(-1, 2):
for j in range(-1, 2):
# checking bounds
if 0 <= y + i < self.height and 0 <= x + j < self.width:
# checking if not insect
if raw_array[y + i][x + j] != -1:
raw_array[y + i][x + j] += 1
self.addCanvasToTiles(raw_array, scaling)
|
[
"def contiguous_pack2(arr, startAt=0):\n unqItms = _N.unique(arr) # 5 uniq items\n nUnqItms= unqItms.shape[0] # \n\n contg = _N.arange(0, len(unqItms)) + unqItms[0]\n nei = _N.where(unqItms > contg)[0]\n for i in xrange(len(nei)):\n arr[_N.where(arr == unqItms[nei[i]])[0]] = contg[nei[i]]\n arr += (startAt - unqItms[0])\n return nUnqItms",
"def map_array_sections(ext):\n xbin, ybin = ext.detector_x_bin(), ext.detector_y_bin()\n\n # These return lists, which is correct (it's ad.XXXX_section()\n # that's wrong; it should return a list containing this list)\n datasec = ext.data_section()\n arrsec = ext.array_section(pretty=False) # pretty required by code\n\n datsec, new_datsec = map_data_sections_to_trimmed_data(datasec)\n\n arrsec_is_list = isinstance(arrsec, list)\n sections = []\n xmin = min(asec.x1 for asec in arrsec) if arrsec_is_list else arrsec.x1\n ymin = min(asec.y1 for asec in arrsec) if arrsec_is_list else arrsec.y1\n for asec in (arrsec if arrsec_is_list else [arrsec]):\n sec = Section((asec.x1 - xmin) // xbin, (asec.x2 - xmin) // xbin,\n (asec.y1 - ymin) // ybin, (asec.y2 - ymin) // ybin)\n for dsec, new_dsec in zip(datsec, new_datsec):\n if new_dsec.contains(sec):\n sections.append(Section(*[a - b + c for a, b, c in\n zip(sec, new_dsec, dsec)]))\n break\n\n return sections if arrsec_is_list else sections[0]",
"def renumber_by_xcenter(seg):\n objects = [(slice(0,0),slice(0,0))]+find_objects(seg)\n def xc(o): \n # if some labels of the segmentation are missing, we\n # return a very large xcenter, which will move them all\n # the way to the right (they don't show up in the final\n # segmentation anyway)\n if o is None: return 999999\n return mean((o[1].start,o[1].stop))\n xs = array([xc(o) for o in objects])\n order = argsort(xs)\n segmap = zeros(amax(seg)+1,'i')\n for i,j in enumerate(order): segmap[j] = i\n return segmap[seg]",
"def extend_indeces(start, n, iInc, jInc):\n return [ (start[0]+k*iInc, start[1]+k*jInc) for k in xrange(0, n) ]",
"def compress(self):\n self.nodes = numpy.zeros([self.bounds[0] / 10 + 10, self.bounds[1] / 10 + 10], dtype='uint8')\n\n for row_index, row in enumerate(self.nodes):\n for node_index, node in enumerate(row):\n begin_x = node_index * 10\n begin_y = row_index * 10\n if numpy.count_nonzero(self.grid[begin_y:begin_y + 10, begin_x:begin_x + 10]): # temp fix by adding 10 nodes of wiggle room\n self.nodes[node_index][row_index] = 1",
"def contiguous_regions(binarr): \n sh = binarr.shape\n regions = [[]]\n visited = np.zeros(sh, bool)\n N = np.prod(sh)\n\n regions = []\n labels, nlab = ndimage.label(binarr)\n for j, o in enumerate(ndimage.find_objects(labels)):\n #sys.stderr.write('\\rlocation %06d out of %06d'%(j+1, nlab))\n origin = np.asarray([x.start for x in o])\n #x1 = np.asarray(np.where(labels[o] == j+1)).T\n x1 = np.argwhere(labels[o] == j+1)\n regions.append( map(tuple, (x1 + origin)))\n \n regions.sort(key = lambda x: len(x), reverse=True)\n return map(lambda x: RegionND(x, binarr.shape), regions)",
"def reindex(self):\n # keep anybody from using these - may have to \n # repopulate depending...\n self.agg_seg_hash=None\n self.agg_exch_hash=None\n\n # presumably agg_seg and agg_exch start as lists - normalize\n # to arrays for reindexing\n agg_seg=np.asarray(self.agg_seg,dtype=self.agg_seg_dtype)\n agg_exch=np.asarray(self.agg_exch,dtype=self.agg_exch_dtype)\n\n # seg_order[0] is the index of the original segments which\n # comes first in the new order.\n seg_order=np.lexsort( (agg_seg['elt'], agg_seg['k']) )\n agg_seg=agg_seg[seg_order]\n # seg_mapping[0] gives the new index of what used to be index 0\n seg_mapping=utils.invert_permutation(seg_order)\n self.agg_seg=agg_seg\n\n # update seg_local_to_agg\n sel=self.seg_local['agg']>=0\n self.seg_local['agg'][sel]=seg_mapping[ self.seg_local['agg'][sel] ]\n\n # update from/to indices in exchanges:\n sel=agg_exch['from']>=0\n agg_exch['from'][sel] = seg_mapping[ agg_exch['from'][sel] ]\n # this used to be >0. Is it possible that was the source of strife?\n sel=agg_exch['to']>=0\n agg_exch['to'][sel] = seg_mapping[ agg_exch['to'][sel] ]\n self.n_agg_segments=len(self.agg_seg)\n\n # lexsort handling of boundary segments: \n # should be okay - they will be sorted to the beginning of each layer\n\n exch_order=np.lexsort( (agg_exch['from'],agg_exch['to'],agg_exch['k'],agg_exch['direc']) )\n agg_exch=agg_exch[exch_order]\n exch_mapping=utils.invert_permutation(exch_order) \n\n sel=self.exch_local['agg']>=0\n self.exch_local['agg'][sel]=exch_mapping[self.exch_local['agg'][sel]]\n\n # bc_local_to_agg - excised\n\n self.n_exch_x=np.sum( agg_exch['direc']==b'x' )\n self.n_exch_y=np.sum( agg_exch['direc']==b'y' )\n self.n_exch_z=np.sum( agg_exch['direc']==b'z' )\n\n # used to populate agg_{x,y,z}_exch, too.\n self.agg_exch=agg_exch # used to be self.agg_exch\n\n # with all the exchanges in place and ordered correctly, assign boundary segment\n # indices to boundary exchanges\n n_bdry_exch=0\n for exch in self.agg_exch:\n if exch['from']>=0:\n continue # skip internal\n assert(exch['from']==REINDEX) # sanity, make sure nothing is getting mixed up.\n n_bdry_exch+=1\n # these have to start from -1, going negative\n exch['from']=-n_bdry_exch\n \n self.log.info(\"Aggregated output will have\" )\n self.log.info(\" %5d segments\"%(self.n_agg_segments))\n self.log.info(\" %5d exchanges (%d,%d,%d)\"%(len(self.agg_exch),\n self.n_exch_x,self.n_exch_y,self.n_exch_z) )\n \n self.log.info(\" %5d boundary exchanges\"%n_bdry_exch)",
"def local_IEN(self,patch_num,patch_mimic=False): \r\n \"\"\"\r\n 1D case\r\n \"\"\"\r\n \r\n if self.dimension==1:\r\n num_bases=self.num_bases\r\n num_element_bases=self.order+1\r\n num_local_bases=num_bases\r\n basis_array=np.linspace(1,num_local_bases,num_local_bases)\r\n\r\n if patch_mimic:\r\n return basis_array.astype(int)-1 #MAY NEED TO REVERSE\r\n else: \r\n \r\n\r\n \"\"\"\r\n Total number of elements in patch\r\n \"\"\"\r\n num_elements=self.number_elements\r\n \r\n \"\"\"\r\n Initializing IEN array\r\n \"\"\"\r\n if num_elements==1:\r\n patch_local_IEN=np.zeros(num_element_bases)\r\n \r\n else:\r\n patch_local_IEN=np.zeros((num_elements,num_element_bases))\r\n \r\n \"\"\"\r\n counter for IEN row\r\n \"\"\"\r\n IEN_row=0\r\n \"\"\"\r\n for loops for finding entries for each row of IEN\r\n \"\"\"\r\n for col in range(self.number_elements):\r\n \r\n \"\"\"\r\n Bounds for rows and columns in basis_array for current element\r\n \"\"\"\r\n lowest_col_in_ele=col*self.mp\r\n highest_col_in_ele=col*self.mp+self.order+1 #formatted to be used as index\r\n\r\n \r\n \"\"\"\r\n Gathers entries for current element in local IEN\r\n \"\"\"\r\n row_local_IEN=basis_array[lowest_col_in_ele:highest_col_in_ele]\r\n \r\n if num_elements==1:\r\n patch_local_IEN=row_local_IEN[::-1]\r\n else:\r\n patch_local_IEN[IEN_row,:]=row_local_IEN[::-1]\r\n \r\n \r\n \"\"\"\r\n Counter for going to next row in IEN\r\n \"\"\"\r\n IEN_row+=1\r\n\r\n \r\n \"\"\"\r\n Ensuring that entry is a 2D array by using a dummy row for consistency\r\n \"\"\"\r\n if len(patch_local_IEN.shape)!=2 :\r\n patch_local_IEN=np.vstack((patch_local_IEN,np.zeros(len(patch_local_IEN))))\r\n \r\n return patch_local_IEN.astype(int)-1\r\n \"\"\"\r\n 2D case\r\n \"\"\"\r\n \"\"\"\r\n Number of bases in principle directions along patch\r\n \"\"\"\r\n \r\n num_basis_xi=self.num_bases[patch_num,0]\r\n num_basis_eta=self.num_bases[patch_num,1]\r\n \r\n \"\"\"\r\n Total number of bases functions over patch\r\n \"\"\"\r\n num_local_bases=num_basis_xi*num_basis_eta\r\n \r\n \"\"\"\r\n Number of supporting bases over an element\r\n \"\"\"\r\n dimensions=self.order[patch_num,:]+1 #Number of bases in xi and eta direction with support on each element\r\n num_element_bases=dimensions.prod() \r\n \r\n \"\"\"\r\n Creating 2d array in \"shape\" of elements in patch that contains basis function numbers\r\n \"\"\"\r\n basis_array=np.linspace(1,num_local_bases,num_local_bases)\r\n basis_array=basis_array.reshape(num_basis_eta,num_basis_xi)\r\n \r\n if patch_mimic:\r\n \r\n return basis_array.astype(int)-1 #MAY NEED TO REVERSE\r\n \r\n else: \r\n \r\n \"\"\"\r\n Total number of elements in patch\r\n \"\"\"\r\n num_elements=self.number_elements[patch_num,:].prod()\r\n \r\n \"\"\"\r\n Initializing IEN array\r\n \"\"\"\r\n if num_elements==1:\r\n patch_local_IEN=np.zeros(num_element_bases)\r\n else:\r\n patch_local_IEN=np.zeros((num_elements,num_element_bases))\r\n \r\n \"\"\"\r\n counter for IEN row\r\n \"\"\"\r\n IEN_row=0\r\n \r\n \"\"\"\r\n for loops for finding entries for each row of IEN\r\n \"\"\"\r\n for row in range(self.number_elements[patch_num,1]):\r\n for col in range(self.number_elements[patch_num,0]):\r\n \r\n #ASK about line 294 in IGA file to shorten this\r\n \r\n \"\"\"\r\n Bounds for rows and columns in basis_array for current element\r\n \"\"\"\r\n lowest_row_in_ele=row*self.mp[patch_num,1]\r\n highest_row_in_ele=row*self.mp[patch_num,1]+self.order[patch_num,1]+1 #formatted to be used as index\r\n lowest_col_in_ele=col*self.mp[patch_num,0]\r\n highest_col_in_ele=col*self.mp[patch_num,0]+self.order[patch_num,0]+1 #formatted to be used as index\r\n \r\n \"\"\"\r\n Gathers entries for current element in local IEN\r\n \"\"\"\r\n row_local_IEN=basis_array[lowest_row_in_ele:highest_row_in_ele,lowest_col_in_ele:highest_col_in_ele]\r\n \r\n if num_elements==1:\r\n patch_local_IEN=row_local_IEN.flatten()[::-1]\r\n else:\r\n patch_local_IEN[IEN_row,:]=row_local_IEN.flatten()[::-1]\r\n \r\n \r\n \"\"\"\r\n Counter for going to next row in IEN\r\n \"\"\"\r\n IEN_row+=1\r\n \r\n \"\"\"\r\n Ensuring that entry is a 2D array by using a dummy row for consistency\r\n \"\"\"\r\n if len(patch_local_IEN.shape)!=2 :\r\n patch_local_IEN=np.vstack((patch_local_IEN,np.zeros(len(patch_local_IEN))))\r\n \r\n return patch_local_IEN.astype(int)-1",
"def simple_augment(multiple, ins, outs):\n print(type(ins))\n print(ins.shape)\n print(type(outs))\n print(outs.shape)\n\n new_ins = np.array(ins, copy=True)\n new_outs = np.array(outs, copy=True)\n for m in range(multiple):\n new_ins = np.concatenate((new_ins, ins))\n new_outs = np.concatenate((new_outs, outs))\n\n # certainly this can be done more efficiently\n for i in range(ins.shape[0]):\n if i % 1000 == 999:\n print('Augment {}'.format(i + 1))\n\n for p in range(ins.shape[1]):\n neighbors = []\n above = p - IMAGE_WIDTH\n if above >= 0:\n neighbors.append(ins[i, above])\n if (p % IMAGE_WIDTH) != 0:\n left = p - 1\n neighbors.append(ins[i, left])\n if (p % IMAGE_WIDTH) != (IMAGE_WIDTH - 1):\n right = p + 1\n neighbors.append(ins[i, right])\n below = p + IMAGE_WIDTH\n if below < (IMAGE_HEIGHT * IMAGE_WIDTH):\n neighbors.append(ins[i, below])\n\n this_pixel = ins[i, p]\n neighbor_pixels = np.mean(neighbors)\n\n baseline = min(this_pixel, neighbor_pixels)\n difference = abs(this_pixel - neighbor_pixels)\n\n if difference == 0.0:\n # this pixel and its neighbors are in equillibrium, can't bleed\n continue\n\n for m in range(multiple):\n new_ins[(ins.shape[0] * (m + 1)) + i, p] = np.random.uniform(baseline, baseline + difference)\n\n print(new_ins.shape)\n print(new_outs.shape)\n\n return new_ins, new_outs",
"def _extend_previous_indexes(self):\n for key, interval_arr in self.intervals.items():\n for idx, interval in enumerate(interval_arr):\n\n min_val = np.min(interval)\n\n if (idx == 0) and (np.min(interval) - self.num_previous_indexes < 0):\n min_val = 0\n elif (idx > 0) and (\n (np.min(interval) - self.num_previous_indexes)\n <= np.max(interval_arr[idx - 1])\n ):\n min_val = np.max(interval_arr[idx - 1]) + 1\n else:\n min_val = np.min(interval) - self.num_previous_indexes\n\n self.intervals[key][idx] = [min_val, np.max(interval)]",
"def updateGEP(partition_name,partition_values,partition_dim,dataType,partitioned_dim):\n for idx,instr in enumerate(ir):\n if \"@\"+partition_name+\",\" in instr and \"getelementptr\"in instr:\n\n # Get the current which indexes of this array are being used in this instruction\n current_indexes=re.findall('\\i64 (.*?)[,\\s]', instr)[1:]\n \n # Now lets try to find out the subarray that this index belongs to\n for part_idx,_ in enumerate(partition_values):\n if current_indexes[partitioned_dim].isdigit():\n if int(current_indexes[partitioned_dim]) in partition_values[part_idx]:\n subarray_idx=part_idx\n break\n else:\n print(\"\\tUnable to partition \"+partition_name+\" - Please unroll it first\")\n shutil.copyfile(sys.argv[1],sys.argv[2])\n exit()\n\n # replace the array name\n ir[idx]=ir[idx].replace(\"@\"+partition_name+\",\",\"@\"+partition_name+\"_sub\"+str(subarray_idx)+\",\")\n\n # replace array dimension\n dim_text=generateDimText(partition_dim[subarray_idx],dataType)\n ir[idx]=ir[idx][:ir[idx].find(\"[\")-1]+dim_text+ir[idx][ir[idx].find(\"*\"):]\n\n # replace array index\n new_indexes=\" i64 0\"\n for i in range(len(partition_dim[0])):\n if i == partitioned_dim:\n for p in partition_values:\n if int(current_indexes[i]) in p:\n current_partition=p[:]\n break\n new_indexes=new_indexes+\", i64 \"+str(current_partition.index(int(current_indexes[i]))) \n else:\n new_indexes=new_indexes+\", i64 \"+current_indexes[i].replace(',','')\n ir[idx]=re.findall('(.*@\\S*,)',ir[idx])[0] + new_indexes +\"\\n\"",
"def add_area_boundary_transects(self,exclude='dummy'):\n areas=[a[0] for a in self.monitor_areas]\n if exclude is not None:\n areas=[a for a in areas if not re.match(exclude,a)]\n\n mon_areas=dict(self.monitor_areas)\n\n seg_to_area=np.zeros(self.hydro.n_seg,'i4')-1\n\n for idx,name in enumerate(areas):\n # make sure of no overlap:\n assert np.all( seg_to_area[ mon_areas[name] ] == -1 )\n # and label to this area:\n seg_to_area[ mon_areas[name] ] = idx\n\n poi0=self.hydro.pointers - 1\n\n exch_areas=seg_to_area[poi0[:,:2]]\n # fix up negatives in poi0\n exch_areas[ poi0[:,:2]<0 ] = -1\n\n # convert to tuples so we can get unique pairs\n exch_areas_tupes=set( [ tuple(x) for x in exch_areas if x[0]!=x[1] and x[0]>=0 ] )\n # make the order canonical \n canon=set()\n for a,b in exch_areas_tupes:\n if a>b:\n a,b=b,a\n canon.add( (a,b) )\n canon=list(canon) # re-assert order\n\n names=[]\n exch1s=[]\n\n for a,b in canon:\n self.log.info(\"%s <-> %s\"%(areas[a],areas[b]))\n name=areas[a][:9] + \"__\" + areas[b][:9]\n self.log.info(\" name: %s\"%name)\n names.append(name)\n\n fwd=np.nonzero( (exch_areas[:,0]==a) & (exch_areas[:,1]==b) )[0]\n rev=np.nonzero( (exch_areas[:,1]==a) & (exch_areas[:,0]==b) )[0]\n exch1s.append( np.concatenate( (fwd+1, -(rev+1)) ) )\n self.log.info(\" exchange count: %d\"%len(exch1s[-1]))\n\n # and add to transects:\n transects=tuple(zip(names,exch1s))\n self.monitor_transects=self.monitor_transects + transects",
"def find_Global_IEN_indeces(self,shared_patch_num,boundary_dir,boundary_end):\r\n \r\n \"\"\"\r\n 2D case\r\n \"\"\"\r\n patch_local_IEN=self.local_IEN(shared_patch_num)\r\n patch_local_IEN_2D=self.local_IEN(shared_patch_num,1)\r\n \r\n \"\"\"\r\n Number of bases in principle directions along sharing patch\r\n \"\"\"\r\n num_basis_xi=self.num_bases[shared_patch_num,0]\r\n num_basis_eta=self.num_bases[shared_patch_num,1]\r\n \r\n \"\"\"\r\n Patches are 1-1 along boundaries\r\n \"\"\"\r\n if boundary_dir==0 and boundary_end==0:\r\n shared_bases=patch_local_IEN_2D[0,:]\r\n \r\n elif boundary_dir==0 and boundary_end==1:\r\n shared_bases=patch_local_IEN_2D[num_basis_eta-1,:]\r\n \r\n elif boundary_dir==1 and boundary_end==0:\r\n shared_bases=patch_local_IEN_2D[:,0]\r\n \r\n elif boundary_dir==1 and boundary_end==1:\r\n shared_bases=patch_local_IEN_2D[:,num_basis_xi-1] \r\n \r\n \"\"\"\r\n Outputs indeces of shared bases\r\n \"\"\"\r\n for i in np.arange(len(shared_bases)):\r\n if i==0:\r\n row_index=np.where(patch_local_IEN==shared_bases[i])[0][0] #Specifes only first instance to avoid repeated indeces\r\n col_index=np.where(patch_local_IEN==shared_bases[i])[1][0]\r\n Global_IEN_indeces=np.vstack((row_index,col_index))\r\n else:\r\n row_index=np.where(patch_local_IEN==shared_bases[i])[0][0]\r\n col_index=np.where(patch_local_IEN==shared_bases[i])[1][0]\r\n Global_IEN_indeces_i=np.vstack((row_index,col_index))\r\n Global_IEN_indeces=np.hstack((Global_IEN_indeces,Global_IEN_indeces_i))\r\n \r\n #row index on top, column index on bottom\r\n return Global_IEN_indeces.astype(int)",
"def _coordinate_offset(anchors: np.ndarray, out_hw: np.ndarray) -> np.array:\n grid = []\n for l in range(len(anchors)):\n grid_y = np.tile(np.reshape(np.arange(0, stop=out_hw[l][0]), [-1, 1, 1, 1]), [1, out_hw[l][1], 1, 1])\n grid_x = np.tile(np.reshape(np.arange(0, stop=out_hw[l][1]), [1, -1, 1, 1]), [out_hw[l][0], 1, 1, 1])\n grid.append(np.concatenate([grid_x, grid_y], axis=-1))\n return np.array(grid)",
"def keep_inside(anchors, img_info):\n with cuda.get_device_from_array(anchors) as d:\n xp = cuda.get_array_module(anchors)\n if d.id >= 0:\n img_info = cuda.to_gpu(img_info, d)\n assert anchors.device == img_info.device\n\n inds_inside = xp.where(\n (anchors[:, 0] >= 0) &\n (anchors[:, 1] >= 0) &\n (anchors[:, 2] < img_info[1]) & # width\n (anchors[:, 3] < img_info[0]) # height\n )[0]\n return inds_inside, anchors[inds_inside]",
"def extract_bed_coordinates_block_format(input_bed, output_exons_bed, output_introns_bed):\n\n # set up dictionary to hold coordinates\n exon_list = collections.defaultdict(lambda: collections.defaultdict())\n intron_list = collections.defaultdict(lambda: collections.defaultdict())\n # read in data\n data = gen.read_many_fields(input_bed, \"\\t\")\n\n with open(output_exons_bed, \"w\") as output_exons:\n with open(output_introns_bed, \"w\") as output_introns:\n for line in data:\n start = int(line[1])\n id = line[3]\n strand = line[5]\n block_sizes = [int(i) for i in line[10].split(\",\") if len(i)]\n start_indices = [int(i) for i in line[11].split(\",\") if len(i)]\n # if on the reverse strand, need to reverse order\n if strand == \"-\":\n block_sizes = block_sizes[::-1]\n start_indices = start_indices[::-1]\n # now get a list of exon ids to use for intron calculations\n exon_ids = list(range(len(start_indices)))\n\n for i in range(len(start_indices)):\n # now get the start and end of the exon coordinates\n start_index = start + start_indices[i]\n end_index = start_index + block_sizes[i]\n # get the exon id\n exon_id = i+1\n # now write to the exons file\n output_exons.write(\"{0}\\t{1}\\t{2}\\t{3}.{4}\\t.\\t{5}\\n\".format(line[0], start_index, end_index, id, exon_id, strand))\n\n if i+1 in exon_ids:\n intron_id = \"{0}-{1}\".format(i+1, i+2)\n if strand == \"-\":\n intron_start = start + start_indices[i+1] + block_sizes[i+1]\n intron_end = start_index\n else:\n intron_start = end_index\n intron_end = start + start_indices[i+1]\n output_introns.write(\"{0}\\t{1}\\t{2}\\t{3}.{4}\\t.\\t{5}\\n\".format(line[0], intron_start, intron_end, id, intron_id, strand))",
"def _merge(arr, aux, lo, mid, high):\n\n aux[lo:high] = arr[lo:high]\n left_indx = lo\n right_indx = mid\n for k in range(lo, high):\n if left_indx == mid:\n arr[k] = aux[right_indx]\n right_indx += 1\n elif right_indx == high:\n arr[k] = aux[left_indx]\n left_indx += 1\n elif aux[left_indx] <= aux[right_indx]:\n arr[k] = aux[left_indx]\n left_indx += 1\n else:\n arr[k] = aux[right_indx]\n right_indx += 1\n\n return",
"def test_06_01_segmented_to_ijv(self):\n x = cellprofiler_core.object.Objects()\n numpy.random.seed(61)\n labels = numpy.random.randint(0, 10, size=(20, 20))\n x.segmented = labels\n ijv = x.get_ijv()\n new_labels = numpy.zeros(labels.shape, int)\n new_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2]\n assert numpy.all(labels == new_labels)",
"def remove_lakes(maskin):\n\n import numpy as np\n\n # Sweep grid and identify each individual basin.\n max_nbr_of_basins = 500 \n # memory error in python if >580 on local machine\n print ('Max number of basins: ', max_nbr_of_basins)\n\n maskin[0,:] = 0\n maskin[:,0] = 0\n maskin[-1,:] = 0\n maskin[:,-1] = 0\n\n [lm,mm] = np.shape(maskin)\n lm=lm-2\n mm=mm-2\n\n tmp = np.zeros((lm + 4, mm + 4))\n tmp[1:-1, 1:-1] = maskin\n maskin = tmp \n\n ## do not forget to remove margins at the end! ##\n\n wet = np.zeros(np.shape(maskin))\n Relations = np.zeros((max_nbr_of_basins,lm*mm))\n Relations1= np.zeros((max_nbr_of_basins,lm*mm)) # needed if >max basins\n cells_to_add = np.zeros((lm, mm))\n\n # Initialization.\n NoBasin = 0\n NoBasin1 = 0\n\n wet[maskin > 0] = 1\n n=0 \n for j in range(mm): # inner lon\n if j%100==0:\n print (j)\n for i in range(lm): # inner lat\n # Must use ic,jc in place of i,j (inner domain).\n ic = i + 2\n jc = j + 2\n\n # If a cell is wet and if it is not related to a basin already identified,\n # then you just discovered a new basin.\n if ( wet[ic, jc] and (~np.any(Relations[:, lm*j+i])) \n and (~np.any(Relations1[:, lm*j+i])) \n and (NoBasin < max_nbr_of_basins)):\n NoBasin = NoBasin + 1 \n\n if (NoBasin == max_nbr_of_basins):\n print ('Too many basins, move to overflow array...')\n Relations1 = np.copy(Relations)\n Relations = np.zeros((max_nbr_of_basins,lm*mm))\n NoBasin1 = NoBasin\n NoBasin = 1\n n = n+1\n print ('n = ', n)\n\n if (NoBasin < max_nbr_of_basins):\n if NoBasin%100==0:\n print (NoBasin, ' basins... ')\n\n # The cell (i,j) must be added to the basin # NoBasin.\n cells_to_add[:] = 0\n cells_to_add[i, j] = 1 \n\n # So you have discovered a new basin. Now, you must define the extent\n # by sweeping the grid and adding all cells that are part of this basin.\n\n not_done = 1\n while not_done: \n \n # If no more cells to add/basin fully explored, look for a new basin.\n if ( ~np.any(cells_to_add) ):\n not_done = 0\n break\n\n # inner_j_loop: for ix2 = inner lon dimension\n for ix2 in range(mm):\n \n if np.any( cells_to_add[:, ix2] ): # To accelerate sweep\n \n # inner_i_loop: for ix1 = 1, lm\n for ix1 in range(lm):\n\n if ( cells_to_add[ix1, ix2] ):\n\n Relations[NoBasin, lm*ix2+ix1] = 1\n cells_to_add[ix1, ix2] = 0\n\n # Look for adjacent wet cells that are not related elsewhere\n # NB. wet array covers whole domain, not just inner domain (+2)\n if (wet[ix1 + 3, ix2 + 2]):\n if (Relations[NoBasin, lm*ix2 + (ix1+1)]==0):\n cells_to_add[ix1 + 1, ix2] = 1\n\n if (wet[ix1 + 1, ix2 + 2]):\n if (Relations[NoBasin, lm *ix2 + (ix1 - 1)]==0):\n cells_to_add[ix1 - 1, ix2 ] = 1\n\n if ( wet[ix1 + 2, ix2 + 3] ):\n if (Relations[NoBasin, lm *(ix2 + 1) + ix1]==0):\n cells_to_add[ix1 , ix2 + 1] = 1\n\n if ( wet[ix1 + 2, ix2 + 1] ):\n if (Relations[NoBasin, lm*(ix2 - 1) + ix1]==0):\n cells_to_add[ix1 , ix2 - 1] = 1\n\n\n # Find the main basin (the one with the largest area).\n print ('Finished exploring the grid...' )\n AreaMainBasin = 0\n \n for i in range(max_nbr_of_basins):\n if (np.sum(Relations[i,:]) > AreaMainBasin ):\n AreaMainBasin = np.sum(Relations[i,:])\n if NoBasin1:\n if (np.sum(Relations1[i,:]) > AreaMainBasin ):\n AreaMainBasin = np.sum(Relations1[i,:]) \n\n if NoBasin1:\n NoBasin=NoBasin+NoBasin1 \n\n print ('There are ', NoBasin, ' different basins over the grid.')\n print ('Area of main basin = ', AreaMainBasin, ' cells.' )\n \n # Consider all other basins as `lakes', and fill them.\n for i in range(max_nbr_of_basins):\n if (np.sum(Relations[i, :]) < AreaMainBasin):\n j = np.squeeze(np.where( Relations[i, :] == 1 ))\n if ( np.size(j) > 0 ):\n i_h = j - lm * np.floor(j/ lm)\n j_h = np.floor(j/lm)\n maskin[(i_h.astype(int) + 2), (j_h.astype(int) + 2)] = 0\n\n # Check additional array if max basins exceeded \n if NoBasin1:\n for i in range(max_nbr_of_basins):\n if (np.sum(Relations1[i, :]) < AreaMainBasin):\n j = np.squeeze(np.where( Relations1[i, :] == 1 ))\n if ( np.size(j) > 0 ):\n i_h = j - lm * np.floor(j/ lm)\n j_h = np.floor(j/lm)\n maskin[(i_h.astype(int) + 2), (j_h.astype(int) + 2)] = 0\n \n # Remove bry margins\n maskout = maskin[1:-1,1:-1]\n maskout[0,:] = maskout[1,:]\n maskout[:,0] = maskout[:,1]\n maskout[-1,:] = maskout[-2,:]\n maskout[:,-1] = maskout[:,-2]\n return(maskout)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
inserting canvas to tiles from final raw_array data
|
def addCanvasToTiles(self, raw_array, scaling):
self.tile_array = []
self.insects_left = 0
for y in range(self.height):
tmp = []
# generator returning random insect name from insect_list used for inserting appropriate canvas
insect_generator = (random.choice(self.insect_list) for _ in iter(int, 1))
for x, insect in zip(range(self.width), insect_generator):
if raw_array[y][x] == -1:
tile = Tile(self.frame, y, x, insect, self.asset_data, scaling)
self.insects_left += 1
elif raw_array[y][x] == 0:
tile = Tile(self.frame, y, x, "tile_clear", self.asset_data, scaling)
else:
tile = Tile(self.frame, y, x, str(raw_array[y][x]), self.asset_data, scaling)
tmp.append(tile)
self.tile_array.append(tmp)
|
[
"def tile_canvas(self):\r\n if not self._tile_canvas:\r\n\r\n # make blank tile_canvas\r\n self._tile_canvas = Image.new(\"RGBA\", (\r\n (np.ptp(self._X) + 1) * TILE_SIZE,\r\n (np.ptp(self._Y) + 1) * TILE_SIZE)) # (x,y) peak to peak = number of tiles * TILE_SIZE\r\n logger.debug(f\"tile_canvas size:{self._tile_canvas.size}\")\r\n\r\n # paint tile_canvas from tiles\r\n for tile in self.tiles:\r\n px_x = (tile.x - min(self._X)) * TILE_SIZE\r\n px_y = (tile.y - min(self._Y)) * TILE_SIZE\r\n self._tile_canvas.paste(tile.img, (px_x, px_y))\r\n\r\n return self._tile_canvas",
"def _render_tiles(self, tiles, wslice, hslice):\n\n for row in tiles:\n for atile in row:\n basex = wslice*atile.x\n basey = hslice*atile.y\n if atile.visited is True:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=atile.bg)\n else:\n self.gamemap.create_rectangle(basex, basey, basex+wslice, basey+hslice, fill=\"black\")",
"def make_tiles(self):\n num_tiles = self._puzzle_height * self._puzzle_width\n #subsurface is a ract(left, top, width, height\n \n for idx in xrange(num_tiles):\n self._tiles.append(self._tiles_sprite.subsurface(\n (idx * TILE_SIZE, 0, TILE_SIZE, TILE_SIZE)))",
"def init_tiles(self):\n\t\tfor y in range(self.height):\n\t\t\tself.tiles.append([])\n\t\t\tfor x in range(self.width):\n\t\t\t\tnext_tile = Tile(self, x, y) #TODO: change if tiles get args\n\t\t\t\tself.tiles[y].append(next_tile)\n\t\t\t\tnext_tile.update()",
"def populateWithNumbers(self, raw_array, scaling):\n for y in range(self.height):\n for x in range(self.width):\n if raw_array[y][x] == -1:\n for i in range(-1, 2):\n for j in range(-1, 2):\n # checking bounds\n if 0 <= y + i < self.height and 0 <= x + j < self.width:\n # checking if not insect\n if raw_array[y + i][x + j] != -1:\n raw_array[y + i][x + j] += 1\n\n self.addCanvasToTiles(raw_array, scaling)",
"def make_final_display(self):\n\n #Title of the search project\n screen.title(\"Sites By MOLA Gray STD And PTP {} Rect\".format(self.name))\n\n #Draw final rects on a colored map\n img_color_rects = self.draw_filtered_rects(IMG_COLOR, self.high_graded_rects)\n\n #To post the new img on tkinter canvas, first convert to a RGB format, then to a compatible photo image\n img_converted = cv.cvtColor(img_color_rects, cv.COLOR_BGR2RGB) #Returns a NumPy array\n\n #Convert array into a photo image for tkinter\n img_converted = ImageTk.PhotoImage(Image.fromarray(img_converted))\n\n #Place image into canvas (coords for upper left corners of canvas (0, 0), converted img, north west anchor direction)\n canvas.create_image(0, 0, image=img_converted, anchor=tk.NW)\n\n #Add summary text for every rect\n #Coords for the bottom left corner of the first txt object\n txt_x = 5\n txt_y = IMG_HT + 20\n for k in self.high_graded_rects:\n #Place txt on canvas (coords (txt_x, txt_y), left justified anchor direction, default font, txt str)\n canvas.create_text(txt_x, txt_y, anchor=\"w\", font=None,\n text=\"rect={} mean elev={:.1f} std={:.2f} ptp={}\"\n .format(k, self.rect_means[k], self.rect_stds[k], self.rect_ptps[k]))\n\n #Increment txt box y coords after drawing each txt object\n txt_y += 15\n\n #Check if the text greater than the bottom canvas. \n if txt_y >= int(canvas.cget(\"height\")) - 10:\n #Make new column shifting x coords by 300 and reset y coords by img heihgt + 20\n txt_x += 300\n txt_y = IMG_HT + 20\n\n canvas.pack() #Packing optimizes the placement of objects in the canvas\n \n canvas.postscript(file=\"images/final_mola_stats.ps\", colormode=\"color\")\n\n screen.mainloop() #mainloop() is an infinite loop that runs tkinter, waits for an event to happen, then processes said event until the window is closed",
"def __initTiles(self):\n for m in range(self.amountVertical):\n for n in range(self.amountHorizontal):\n tile = self.themeFactory.createThemeElement(self.mapfile[m][n])\n tile.setCoordinates(m, n)\n tile.number = (m * self.amountHorizontal) + n\n self.tiles.append(tile)\n self.sprites.add(tile)",
"def visualize_data(self):\r\n\r\n\t\tx = np.arange(self.tile_width)\r\n\t\ty = np.arange(self.tile_height)\r\n\t\thovertext = []\r\n\r\n\t\tcounts_list = np.zeros(len(self.data_list))\r\n\t\tfiles_list = np.empty(len(self.data_list), dtype = 'object')\r\n\r\n\t\tfor index, item in enumerate(self.data_list):\r\n\t\t\tcounts_list[index] = item[1]\r\n\t\t\tfiles_list[index] = item[0]\r\n\r\n\t\tcounts_list = counts_list.reshape((self.tile_width, self.tile_height))\r\n\t\tfiles_list = files_list.reshape((self.tile_width, self.tile_height))\r\n\r\n\t\tfor yi, yy in enumerate(y):\r\n\t\t\thovertext.append(list())\r\n\t\t\tfor xi, xx in enumerate(x):\r\n\t\t\t\thovertext[-1].append('File name : {}<br />Count: {}'.format(np.flipud(files_list)[self.tile_height -1 -yi][xi], np.flipud(counts_list)[self.tile_height - 1 - yi][xi]))\r\n\r\n\t\ttrace = go.Heatmap(z = counts_list, x = x, y = y, hoverinfo = 'text', text = hovertext, colorscale = 'Greys')\r\n\t\tdata = [trace]\r\n\t\tpy_offline.plot(data, filename = './tiles/tile'+str(self.tile_number)+'.html', auto_open = False)",
"def clear(self):\n \n self.canvas = [\n [\n Tile(\n states=list(range(len(self.patterns))) # indices of all the patterns\n ) \n for j in range(self.width)\n ]\n for i in range(self.height)\n ]",
"def new(self):\n self.all_sprites = pygame.sprite.LayeredUpdates()\n self.walls = pygame.sprite.Group()\n self.holes = pygame.sprite.Group()\n self.decelerations = pygame.sprite.Group()\n self.holdbacks = pygame.sprite.Group()\n self.viruses_shoot = pygame.sprite.Group()\n self.viruses_move = pygame.sprite.Group()\n self.shooting = pygame.sprite.Group()\n self.items = pygame.sprite.Group()\n self.map = Map(path.join(self.map_folder, 'new_tilemap.tmx'))\n self.map_img = self.map.make_map()\n self.map_rect = self.map_img.get_rect()\n self.dark = True\n for tile_object in self.map.tmxdata.objects:\n obj_centerx = tile_object.x + tile_object.width / 2\n obj_centery = tile_object.y + tile_object.height / 2\n if tile_object.name == 'player':\n if self.role1_col == YELLOW:\n self.player = Player(self, obj_centerx, obj_centery, 'role1')\n else:\n self.player = Player(self, obj_centerx, obj_centery, 'role2')\n if tile_object.name == 'wall':\n Wall(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == 'hole':\n Hole(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == 'deceleration':\n Deceleration(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == 'holdback':\n Holdback(self, tile_object.x, tile_object.y)\n if tile_object.name == 'virus_shoot':\n Virus(self, obj_centerx, obj_centery, 'shoot')\n if tile_object.name == 'virus_movex':\n Virus(self, obj_centerx, obj_centery, 'move_x')\n if tile_object.name == 'virus_movey':\n Virus(self, obj_centerx, obj_centery, 'move_y')\n if tile_object.name in ['treatment', 'key', 'light']:\n Item(self, obj_centerx, obj_centery, tile_object.name)\n self.camera = Camera(self.map.width, self.map.height)",
"def capture(self):\n # insert the canvas\n self.fitsimage.add(self.canvas, tag='mycanvas')",
"def draw_pieces(self):\n for i in range(8):\n for j in range(8):\n if self.get_board_array()[i, j].get_content() is not None:\n self.screen.blit(\n self.get_board_array()[i, j].get_content().get_visual(),\n (int(j * self.h / 8), int(i * self.h / 8))\n )",
"def place_initial_tile(self):\n self.create_list_tiles()\n add_dist = True\n x_tile = (self.FIRST_TILE)\n y_tile = (self.FIRST_TILE)\n color = self.WHITE\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES-1][self.HALF_SQUARES-1] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n y_tile = (self.FIRST_TILE)\n color = self.BLACK\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES-1][self.HALF_SQUARES] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE)\n y_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n color = self.BLACK\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES][self.HALF_SQUARES-1] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)\n\n x_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n y_tile = (self.FIRST_TILE+self.CELL_WIDTH)\n color = self.WHITE\n tile = Tile(x_tile, y_tile, self.CHAR_WIDTH, color)\n self.list_tiles[self.HALF_SQUARES][self.HALF_SQUARES] = tile\n tile.draw_tile(self.x_add_disc, self.y_add_disc, add_dist)",
"def create_individual_building_raster(self):\n canvas = np.zeros((self.max_y - self.min_y + 1,\n self.max_x - self.min_x + 1))\n for point in self.points:\n canvas[point[1] - self.min_y, point[0] - self.min_x] = 1\n return canvas",
"def draw_tile_backgrounds(self, tiles):\n\n def process_tile(tile):\n h = tile.height\n h_index = (h - self.parent.min_height) / (self.parent.max_height - self.parent.min_height)\n\n rgb_rand_1 = random.randint(0, self.ocean_noise)\n\n height_rgb = [0, 0, 0]\n height_rgb[0] = self.height_rgb_low[0] + h_index * (self.height_rgb_high[0] - self.height_rgb_low[0])\n height_rgb[1] = self.height_rgb_low[1] + h_index * (self.height_rgb_high[1] - self.height_rgb_low[1])\n height_rgb[2] = self.height_rgb_low[2] + h_index * (self.height_rgb_high[2] - self.height_rgb_low[2])\n\n water_rgb = (rgb_rand_1, rgb_rand_1, 255)\n if self.screen_mode == \"dark\":\n water_rgb = (rgb_rand_1 // 2, rgb_rand_1 // 2, 150)\n if self.screen_mode == \"martin\":\n water_rgb = (195 + rgb_rand_1 * 0.5, 234 + rgb_rand_1 * 0.5, 251)\n\n fillColors = [\n height_rgb, # Ground\n height_rgb, # Rail\n self.road_tile_rgb, # Road\n height_rgb, # Town building\n height_rgb, # Trees\n self.station_rgb, # Stations\n water_rgb, # Water\n height_rgb, # Void\n self.industry_rgb, # Industries\n self.torb_rgb, # Tunnel/bridge\n height_rgb, # Objects\n ]\n fillColor = fillColors[tile.kind % len(fillColors)]\n if tile.kind == 1:\n rail = tile.occupant\n if rail.is_depot:\n fillColor = self.rail_depot_rgb\n\n if tile.kind == 5:\n station = tile.occupant\n if station.station_type == 0:\n fillColor = self.rail_station_rgb\n if station.station_type == 1:\n fillColor = self.airport_rgb\n if station.station_type == 2:\n fillColor = self.bus_station_rgb\n if station.station_type == 3:\n fillColor = self.truck_station_rgb\n if station.station_type == 4:\n fillColor = self.heliport_rgb\n if station.station_type == 5:\n fillColor = self.seaport_rgb\n\n self.draw_square(tile, fillColor)\n if tile.kind == 1:\n rail = tile.occupant\n if not rail.is_depot:\n self.draw_rail_background(tile)\n\n if self.parent.show_progress_bar:\n with alive_bar(len(tiles)) as abar:\n for tile in tiles:\n process_tile(tile)\n abar()\n else:\n for tile in tiles:\n process_tile(tile)",
"def initialize_tiles(self):\n for x in range(0, self.width, self.tile_size):\n for y in range(0, self.height, self.tile_size):\n self.group_tiles.add(Tile(x, y, self.tile_size))",
"def __init__(self):\n\n # Dimensions of the texture array.\n self.__width = 1024\n self.__height = 1024\n self.__depth = 20\n self.__scratch_depth = 2\n\n # Allocate the texture array.\n # NOTE: If this goes wrong, we're probably trying to do this before\n # the opengl context has been created, and things will go horribly\n # wrong later! For some reason glGetError() is returning 0 anyway.\n self.__texture = GL.glGenTextures(1)\n\n # Ok, initialise the texture.\n GL.glBindTexture(GL.GL_TEXTURE_2D_ARRAY, self.__texture)\n GL.glTexParameteri(GL.GL_TEXTURE_2D_ARRAY, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n GL.glTexParameteri(GL.GL_TEXTURE_2D_ARRAY, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)\n GL.glTexParameteri(GL.GL_TEXTURE_2D_ARRAY, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)\n GL.glTexParameteri(GL.GL_TEXTURE_2D_ARRAY, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)\n GL.glTexImage3D(\n GL.GL_TEXTURE_2D_ARRAY,\n 0, #level\n GL.GL_RGBA8, # internal format\n self.__width,\n self.__height,\n self.__depth + self.__scratch_depth,\n 0, #border\n GL.GL_RGBA, # format\n GL.GL_UNSIGNED_BYTE, # data type\n None # The data.\n )\n\n # We insert images one at a time, and keep track of the current\n # insertion point. When we reach the end of the row, the next\n # row starts at a y coordinate flush with the bottom of the tallest\n # item in the current row. Note that this will end up with lots of\n # wasted space, we don't do any work to optimise the packing!\n self.__cursor = TextureArray.Cursor()\n self.__cursor.end = self.__depth\n\n # Initialise the scratch cursor.\n self.__scratch_cursor = TextureArray.Cursor()\n self.__scratch_cursor.index = self.__depth\n self.__scratch_cursor.end = self.__depth + self.__scratch_depth\n\n # Map from filenames to virtual textures.\n self.__filename_map = {}",
"def fence(x, y, l, w, item):\r\n for a in range(x, l + x, 10):\r\n for b in range(y, w + y, 10):\r\n main_canvas.create_image(a, b, image=item, anchor=NW)",
"def im_split_z(data,dim,depth):\r\n\r\n\tn_tot = len(data[0][:,0]) #number of events\r\n\r\n\t#define lattice to insert energies into to creta images\r\n\tx_ref = np.linspace(x_min,x_max,dim)\r\n\ty_ref = np.linspace(y_min,y_max,dim)\r\n\tz_ref = np.linspace(z_min,z_max,depth)\r\n\r\n\th=0 #count events\r\n\tim_array = np.zeros(n_tot,dim,dim,depth)\r\n\r\n\twhile h<n_tot: #loop over events\r\n\r\n\t\tim_tab = np.zeros((dim,dim,depth)) #array to contain constructed image\r\n\r\n\t\t#remove empty hits\r\n\t\tind_nul = np.where(data[3][h,:]==0)[0]\r\n\t\tx_tab = data[0][h,:ind_nul]\r\n\t\ty_tab = data[1][h,:ind_nul]\r\n\t\tz_tab = data[2][h,:ind_nul]\r\n\t\te_tab = data[3][h,:ind_nul]\r\n\r\n\t\tfor i in range(len(e_tab)): #loop over hits within event\r\n\r\n\t\t\tx_coord = np.argmin(np.abs(x_ref-x_tab[i]))\r\n\t\t\ty_coord = np.argmin(np.abs(y_ref-y_tab[i]))\r\n\t\t\tz_coord = np.argmin(np.abs(z_ref-z_tab[i]))\r\n\r\n\t\t\tim_array[h,x_coord,y_coord,z_coord] += e_tab[i]\r\n\r\n\r\n\t\t#show a few plots of actual event vs events converted to picture\r\n\t\t\"\"\"\r\n\t\tif h<5:\r\n\t\t\tdata_actual = [x_tab,y_tab,z_tab,e_tab]\r\n\t\t\tdata_comp = im_array[h,:,:,:]\r\n\t\t\tplot_scatter(data_actual,data_comp,depth)\r\n\t\t\"\"\"\r\n\t\th+=1 #next event\r\n\r\n\t\tif h % 10000 == 0:\r\n\t\t\tprint('{}% completed'.format(h/n_tot*100))\r\n\r\n\treturn im_array"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
binding events to every tile on the board
|
def bindBoardEvents(self):
assert self.tile_array
for x in self.tile_array:
for y in x:
y.tile.bind("<Button-1>", lambda _, y=y: self.uncoverCheck(y))
if DEBUG_FLAG:
y.tile.bind("<Button-2>", lambda _, y=y: self.uncoverAll())
else:
y.tile.bind("<Button-2>", lambda _, y=y: self.flagTile(y))
y.tile.bind("<Button-3>", lambda _, y=y: self.flagTile(y))
|
[
"def handleClick(self, event):\n print(str(event.x) + ' ' + str(event.y))\n for listener in self.listeners:\n listener.handleClick(self.coordsToGrid(event.x, event.y))",
"def _handle_tile_state_click(self, x, y, button):\n add = (button == sdl2.SDL_BUTTON_LEFT)\n tile = self._level.screen_coords_to_tile(vector.Vector(x, y))\n\n if tile:\n tile_coords = tile.coords\n height = tile.height + (1 if add else -1)\n colour = tile.colour\n else:\n tile_coords = self._level.screen_coords_to_tile_coords(\n vector.Vector(x, y))\n height = 1 if add else 0\n colour = self.colour\n\n if self._level.tile_coords_valid(tile_coords):\n index = self._level.tile_coords_to_array_index(tile_coords)\n if height > 0:\n self._level.tiles[index.y, index.x] = typingdefense.level.Tile(\n self._app,\n self._level.cam,\n tile_coords,\n height,\n colour)\n else:\n self._level.tiles[index.y, index.x] = None",
"def iter_board(self):\r\n\r\n\t\t# iter_live found in life_iteration_functions.py\r\n\t\tself.cells = iter_life(self.cells)",
"def handle_events(self, events, world):\n pass",
"def bind_game_events(self):\n self._game.on('reset', self._refresh_status)\n self._game.on('complete', self._drop_complete)\n self._game.on('connect', self._connect)\n self._game.on('undo', self._undo)",
"def event_handler(self):\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type == pg.KEYDOWN:\n self.toggle_show_fps(event.key)\n\n self.ui.state_events(self.state, event)\n\n self.state.get_event(event)",
"def loadTiles(self):\n self.tile = pygame.image.load(\"./hextile.png\").convert()\n self.tile.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n\n self.cursor = pygame.image.load(\"./hexcursor.png\").convert()\n self.cursor.set_colorkey((0x80, 0x00, 0x80), RLEACCEL) \n self.cursorPos = self.cursor.get_rect()",
"def raise_all_sunken_cells(self) -> None:\r\n while self.sunken_cells:\r\n coord = self.sunken_cells.pop()\r\n if self._board[coord] == CellUnclicked():\r\n self.set_cell_image(coord, \"btn_up\")",
"def update_all(self, grid):\n for tile in self.group:\n tile.update()\n self.group.draw(grid.background)",
"def _tileClicked(self):\n tile = self.sender()\n\n row,col = tile.pos\n\n if tile.isChecked():\n self.row_select_field.setText(str(row))\n self.col_select_field.setText(str(col))\n\n global selected_row\n global selected_col\n old_row_select = selected_row\n old_col_select = selected_col\n selected_row = row\n selected_col = col\n\n # TODO: Deselect all other check boxes\n print(f\"Checked {row},{col}\")\n if (old_row_select is not None) and (old_col_select is not None):\n self.grid_layout.itemAtPosition(old_row_select,old_col_select).widget().setChecked(False)\n elif (row == selected_row) and (col == selected_col):\n pass\n else:\n # TODO: Checks if no other boxes are checked i.e. there shouldn't be a state when no box is checked.\n # If user unchecks a box, do something like not unchecking or checks box (0,0).\n print(f\"Unchecked {row},{col}\")",
"def update(self):\n for row in range(self._puzzle_height):\n for col in range(self._puzzle_width):\n tile_num = self._puzzle.get_number(row, col)\n self._screen.blit(self._tiles[tile_num],\n (col * TILE_SIZE + BORDER_SIZE,\n row * TILE_SIZE + BORDER_SIZE))",
"def ProcessUiEvent(self, event):\n self.board.ProcessUiEvent(event)\n self.wheel.ProcessUiEvent(event)\n self.scoreboard.ProcessUiEvent(event)",
"def drawBoard(self):\n \n self.button_Font = (\"Arial\", 68, \"bold\")\n self.button_List = []\n \n for boxes in range(9):\n self.button_List.append(tkinter.Button(self.main_window, text = \"\",\n font = self.button_Font, bg = \"black\", fg = \"white\", width = 3, height = 1,\n command = lambda pos = boxes: self.boxPressed(pos)))\n index = 0\n for r in range(3):\n for col in range(3):\n self.button_List[index].grid(row = r, column = col)\n index += 1",
"def init_tiles(self):\n\t\tfor y in range(self.height):\n\t\t\tself.tiles.append([])\n\t\t\tfor x in range(self.width):\n\t\t\t\tnext_tile = Tile(self, x, y) #TODO: change if tiles get args\n\t\t\t\tself.tiles[y].append(next_tile)\n\t\t\t\tnext_tile.update()",
"def explore_adjacent_tiles(self, tiles):\n center_x, center_y = (self.rect.left + 1)/32, (self.rect.top + 1)/32\n width, height = len(tiles[0]), len(tiles) \n x1, y1 = center_x - 2, center_y - 2\n x2, y2 = center_x + 2, center_y + 3\n for y in xrange( y1, y2 ):\n if( 0 <= y < height ):\n for x in xrange( x1, x2 ):\n if( 0 <= x < width ):\n tiles[y][x].map()",
"def onboard(self) -> None:",
"def bind_shortcuts(self):\n super().bind_shortcuts()\n self.root.bind(\"<s>\", lambda event: self.solve())\n self.root.bind(\"1\", lambda event: self.flag_obvious_cells())\n self.root.bind(\"2\", lambda event: self.double_left_click_obvious_cells())\n self.root.bind(\"3\", lambda event: self.find_last_bomb())",
"def bind_clicks(self):\n # call back function, e: event\n self.bind(\"<Button-1>\", lambda e: self._handle_left_click((e.x, e.y)))\n self.bind(\"<Button-2>\", lambda e: self._handle_right_click((e.x, e.y)))\n self.bind(\"<Button-3>\", lambda e: self._handle_right_click((e.x, e.y)))",
"def __click_event(self, event):\n\n if self.board is None:\n return\n\n largeur = self.canvas.winfo_width()\n hauteur = self.canvas.winfo_height()\n\n colomne_space = largeur / self.board.width\n ligne_space = hauteur / self.board.height\n\n # on recupaire le position dans la grille\n grid_pos_x = floor(event.x / colomne_space)\n grid_pos_y = floor(event.y / ligne_space)\n try:\n # Si on a fait un click gauche et que on a choisi de placer un joueur\n if self.select_set.get() == 1:\n print(\"player\")\n self.delete_shape_board(self.board.player_pos[0], self.board.player_pos[1])\n self.board.mat[self.board.player_pos[0]][self.board.player_pos[1]] = \\\n Case(Case.VIDE, self.board.recompence[Board.VIDE])\n\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.START)\n self.board.player_pos[0] = grid_pos_y\n self.board.player_pos[1] = grid_pos_x\n self.draw_player(grid_pos_y, grid_pos_x)\n\n # Si on a fait un click gauche et que on a choisi de placer la cible\n elif self.select_set.get() == 2:\n print(\"target\")\n self.delete_shape_board(self.board.target_pos[0], self.board.target_pos[1])\n self.board.mat[self.board.target_pos[0]][self.board.target_pos[1]] = \\\n Case(Case.VIDE, self.board.recompence[Board.VIDE])\n\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.FIN, self.board.recompence[Board.FIN])\n self.board.target_pos[0] = grid_pos_y\n self.board.target_pos[1] = grid_pos_x\n self.draw_target(grid_pos_y, grid_pos_x)\n\n elif self.select_set.get() == 3:\n print(\"Obstacle\")\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.OBSTACLE)\n self.draw_obstacle(grid_pos_y, grid_pos_x)\n\n elif self.select_set.get() == 4:\n print(\"Danger\")\n self.delete_shape_board(grid_pos_y, grid_pos_x)\n self.board.mat[grid_pos_y][grid_pos_x] = Case(Case.DANGER, self.board.recompence[Board.DANGER])\n self.draw_danger(grid_pos_y, grid_pos_x)\n except IndexError:\n print(\"Error index\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checking if there are any adjacent clear tiles and uncovering them
|
def uncoverClear(self, tile, insect_check=False):
stack = [tile]
visited = set()
while len(stack) > 0:
tmp_tile = stack.pop()
if tmp_tile.tile_name == "tile_clear" and tmp_tile not in visited:
for i in range(-1, 2):
for j in range(-1, 2):
if 0 <= tmp_tile.y + i < self.height and 0 <= tmp_tile.x + j < self.width and not (
i == 0 and j == 0):
stack.append(self.tile_array[tmp_tile.y + i][tmp_tile.x + j])
visited.add(tmp_tile)
if tmp_tile.status == "flagged":
self.insect_count += 1
if insect_check:
# checking if the tile uncovered is an insect
if tmp_tile.tile_name in self.insect_list:
self.loseGame()
tmp_tile.uncover()
tmp_tile.updateUI()
|
[
"def find_clearing_to_land():\n # Find a place on the lower half of the screen where there is no identifiable objects\n # Move closer... check again... repeat till height is near 0\n # land and power down\n pass",
"def get_adjacent_tiles(self):\n return list(set(self.corner_right.get_tiles()) & set(self.corner_left.get_tiles()))",
"def collapse(self):\n\n while not self.has_collapsed:\n min_i, min_j = self.get_lowest_entropy()\n\n if not self.canvas[min_i][min_j].has_collapsed:\n self.canvas[min_i][min_j].collapse()\n\n # Continue until there are no more affected tiles\n affected = self.get_neighbors(min_i, min_j)\n\n total_updated = 0\n\n while len(affected) > 0:\n new_affected = []\n\n # Go through all currently affected tiles\n start_time = time.time()\n for i, j in affected:\n # print('wowieee!')\n if not self.canvas[i][j].has_collapsed:\n neighbors = self.get_neighbors(i, j)\n neighbor_tiles = [\n self.canvas[u][v].states\n for u,v in neighbors\n ]\n\n # Calculate the new states of (i, j) based on its neighbors\n new_states = self.matcher.match(self.canvas[i][j].states, neighbor_tiles)\n\n # If the new states are different to the current ones,\n # update the states for (i, j) and add neighbors to affected\n current_states = self.canvas[i][j].states\n \n if tuple(current_states) != new_states:\n # print(new_states)\n self.canvas[i][j].update_states(new_states)\n \n new_affected += [\n pos for pos in set(neighbors).difference(set(affected))\n if pos not in new_affected and pos not in affected\n ]\n\n total_updated += 1\n print(time.time()-start_time)\n\n # if not new_affected:\n # for i, j in np.ndindex((self.height, self.width)):\n # if not self.canvas[i][j].has_collapsed:\n # neighbors = self.get_neighbors(i, j)\n # neighbor_tiles = [\n # self.canvas[u][v].states\n # for u,v in neighbors\n # ]\n\n # # Calculate the new states of (i, j) based on its neighbors\n # new_states = self.matcher.match(self.canvas[i][j].states, neighbor_tiles)\n\n # # If the new states are different to the current ones,\n # # update the states for (i, j) and add neighbors to affected\n # current_states = self.canvas[i][j].states\n \n # if tuple(current_states) != new_states:\n\n # print('ayyy')\n # self.canvas[i][j].update_states(new_states)\n \n # new_affected += [\n # pos for pos in set(neighbors).difference(set(affected))\n # if pos not in new_affected and pos not in affected\n # ]\n\n # total_updated += 1\n\n affected = new_affected\n # print(str(self))\n \n\n print(f'{int(self.count_collapsed()/(self.width*self.height)*100)}% done')\n print('total updated: ',total_updated)\n # print(str(self).replace('!', \"[red]![/red]\"), '\\n')\n \n # Return False if there are any erroneous tiles\n # if str(self).count(self.errchar):\n # return False\n # print(str(self))\n\n return True",
"def explore_adjacent_tiles(self, tiles):\n center_x, center_y = (self.rect.left + 1)/32, (self.rect.top + 1)/32\n width, height = len(tiles[0]), len(tiles) \n x1, y1 = center_x - 2, center_y - 2\n x2, y2 = center_x + 2, center_y + 3\n for y in xrange( y1, y2 ):\n if( 0 <= y < height ):\n for x in xrange( x1, x2 ):\n if( 0 <= x < width ):\n tiles[y][x].map()",
"def get_end_tiles(self):\n return list(set(self.corner_right.get_tiles()) ^ set(self.corner_left.get_tiles()))",
"def check_if_all_tiles_cleared(self):\r\n all_cleared = True\r\n for tile in self.tiles.values():\r\n if not tile.is_mine:\r\n if tile.is_hidden:\r\n all_cleared = False\r\n break\r\n return all_cleared",
"def findDeadRegions(self): \n color_groups = [self.rootstate.get_black_groups(),\n self.rootstate.get_white_groups()]\n for groups in color_groups:\n for key in groups.keys():\n group = groups[key]\n if len(group) < 2:\n continue\n color = self.rootstate.get_color(group[0])\n nb = set()\n \n for cell in group:\n for neighbor in self.rootstate.neighbors(cell):\n if self.rootstate.get_color(neighbor) == Gamestate.PLAYERS[\"none\"]:\n nb.add(neighbor)\n \n checkSide1 = True\n checkSide2 = True\n if color == Gamestate.PLAYERS[\"black\"]:\n for cell in group:\n if cell[1] == 0:\n checkSide1 = False\n elif cell[1] == self.rootstate.size - 1:\n checkSide2 = False\n elif color == Gamestate.PLAYERS[\"white\"]:\n for cell in group:\n if cell[0] == 0:\n checkSide1 = False\n elif cell[0] == self.rootstate.size - 1:\n checkSide2 = False\n \n self.dead = self.dead | self.findEdgeUnreachable(color, nb, checkSide1,\n checkSide2)",
"def flag_obvious_cells(self):\n # If the game is over, do nothing.\n if self.game_over:\n return\n\n # Flag the appropriate cells and removes the appropriate cell (not the cell flagged)\n # off the list of active cells.\n for cell in self.list_active_cells():\n if self.neighboring_bombs(cell.row, cell.column) == \\\n self.neighboring_flags(cell.row, cell.column) + self.neighboring_uncovered(cell.row, cell.column):\n for row_offset, column_offset in product((0, -1, 1), (0, -1, 1)):\n try:\n current_cell = self.cells[cell.row + row_offset][cell.column + column_offset]\n if not (row_offset == 0 and column_offset == 0) and \\\n cell.row + row_offset >= 0 and cell.column + column_offset >= 0 and \\\n current_cell.state == \"covered\":\n current_cell.right_click()\n except IndexError:\n pass\n self.remove_active_cell(cell)\n self.updated = True",
"def isTileCleaned(self, m, n):\n if (self.tiles[(int(m), int(n))] == 1):\n return True\n return False",
"def uncover_neighbors(self, row, column):\n super().uncover_neighbors(row, column)\n\n for row_offset, column_offset in product((-1, 0, 1), (-1, 0, 1)):\n try:\n if self.cells[row + row_offset][column + column_offset].state == \"uncovered\" and \\\n row + row_offset >= 0 and column + column_offset >= 0 and \\\n self.neighboring_bombs(row + row_offset, column + column_offset) - \\\n self.neighboring_flags(row + row_offset, column + column_offset) >= 0 and \\\n self.neighboring_uncovered(row + row_offset, column + column_offset) > 0 and \\\n not self.cells[row + row_offset][column + column_offset] in self.list_active_cells():\n self.insert_active_cell(self.cells[row + row_offset][column + column_offset])\n except (TypeError, IndexError):\n pass",
"def apply_simple_heuristics(self):\r\n\r\n # Loops through all the cells one by one\r\n for y in range(self.board_y_size):\r\n for x in range(self.board_x_size):\r\n\r\n # If the cell is blank\r\n if self.board[y, x] == 0:\r\n\r\n # Checks if the two cells to the right are the same colour; if they are, this must be different\r\n if x < self.board_x_size - 2:\r\n if self.board[y, x + 1] == self.board[y, x + 2] != 0:\r\n self.board[y, x] = -self.board[y, x + 1]\r\n\r\n # Checks if the two cells to the left are the same colour; if they are, this must be different\r\n if x > 1:\r\n if self.board[y, x - 2] == self.board[y, x - 1] != 0:\r\n self.board[y, x] = -self.board[y, x - 1]\r\n\r\n # Checks if the two cells below are the same colour; if they are, this must be different\r\n if y < self.board_y_size - 2:\r\n if self.board[y + 1, x] == self.board[y + 2, x] != 0:\r\n self.board[y, x] = -self.board[y + 1, x]\r\n\r\n # Checks if the two cells above are the same colour; if they are, this must be different\r\n if y > 1:\r\n if self.board[y - 1, x] == self.board[y - 2, x] != 0:\r\n self.board[y, x] = -self.board[y - 1, x]\r\n\r\n # Checks if the adjacent cells row-wise are the same colour; if they are, this must be different\r\n if 0 < x < self.board_x_size - 1:\r\n if self.board[y, x - 1] == self.board[y, x + 1] != 0:\r\n self.board[y, x] = -self.board[y, x + 1]\r\n\r\n # Checks if the adjacent cells column-wise are the same colour; if they are, this must be different\r\n if 0 < y < self.board_y_size - 1:\r\n if self.board[y - 1, x] == self.board[y + 1, x] != 0:\r\n self.board[y, x] = -self.board[y + 1, x]",
"def reduce_puzzle(grid):\n stuck = False\n while not stuck:\n\n begin = len([i for i in boxes if len(grid[i]) == 1])\n\n grid = eliminate(grid)\n grid = only_choice(grid)\n grid = naked_twins(grid)\n #grid = naked_triple(grid) - something for the future\n end = len([i for i in boxes if len(grid[i]) == 1])\n stuck = begin == end\n\n sanity = len([i for i in boxes if len(grid[i]) == 0])\n if sanity > 0:\n return\n\n return grid",
"def find_unvisited_neighbours(self, cell_row, cell_col):#, wall_cell=False):\n valid_neighbours = self.grid[cell_row][cell_col].valid_neighbours()\n for vn in valid_neighbours:\n vn_cell = self.grid[vn[0]][vn[1]]\n if not vn_cell.visited:\n valid_neighbours.remove(vn)\n # if wall_cell:\n # if vn_cell.value == 1:\n # # what if it is already removed?\n # valid_neighbours.remove(vn)\n return valid_neighbours",
"def reveal_all_clear_cells_from(self, x, y) :\n\n assert x in range(0, self.__width), \"x must be between 0 and the game width\"\n assert y in range(0, self.__height), \"y must be between 0 and the game height\"\n\n cell = self.get_cell(x, y)\n\n if cell.is_bomb() :\n cell.reveal()\n self.set_state(GameState.losing)\n else :\n if not cell.is_revealed() :\n cell.reveal()\n self.__nbCellsUnrevealed -= 1\n if cell.number_of_bombs_in_neighborhood() == 0:\n for (u, v) in neighborhood(x, y, self.__width, self.__height) :\n self.reveal_all_clear_cells_from(u, v)\n\n if self.__nbCellsUnrevealed == self.__nbBombs :\n self.set_state(GameState.winning)",
"def clear_lines(grid):\n count=0\n for i in range(20):\n full=True\n for j in range(10):\n if(grid[i][j] is None): \n full=False\n break\n if(full):\n count+=1\n for j in range(10):\n grid[i][j]=None\n i=19\n j=18\n while(i>0 and j>=0):\n null=True\n for k in range(10):\n if(grid[i][k] is not None):\n null=False\n break\n if(null):\n j=min(i-1,j)\n while(j>=0 and null):\n null=True\n for k in range(10):\n if(grid[j][k] is not None):\n null=False\n break\n if(null): j-=1\n if(j<0): break\n for k in range(10):\n grid[i][k]=grid[j][k]\n grid[j][k]=None\n if(grid[i][k] is not None): grid[i][k].y=tetris.HALF_WIDTH+i*tetris.FULL_WIDTH\n j-=1\n i-=1\n \n if (count > 0):\n return True\n else:\n return False",
"def check_lost (grid):\r\n height=4\r\n #check for 0 value in grid \r\n for row in range(height):\r\n for col in range(height):\r\n if 0 in grid[row]:\r\n return False\r\n #check for equal adjacent values horizontally \r\n for row in range(height):\r\n for col in range(height-1): \r\n if grid[row][col] == grid[row][col+1]:\r\n return False\r\n \r\n #check for equal adjacent values vertically \r\n for row in range(height-1):\r\n for col in range(height): \r\n if grid[row][col] == grid[row+1][col]:\r\n return False \r\n else:\r\n return True",
"def isTileCleaned(self, m, n):\n if (m,n) in self.cleanTile:\n return True\n else:\n return False",
"def has_unknown_neighbors_of_8(self, x, y):\n if self.has_unknown_neighbors_of_4(x, y):\n return True\n else:\n notAvailibleSpaces = []\n \n if(x!=0 and y!=0) and (self.is_cell_not_walkable(x-1,y-1)):\n notAvailibleSpaces.append((x-1,y-1))\n\n if(x!=self.map.info.width-1 and y!=self.map.info.height-1) and (self.is_cell_not_walkable(x+1,y+1)):\n notAvailibleSpaces.append((x+1,y+1))\n\n if(x!=self.map.info.width-1 and y!=0) and (self.is_cell_not_walkable(x+1,y-1)):\n notAvailibleSpaces.append((x+1,y-1))\n\n if(x!=0 and y!=self.map.info.height-1) and (self.is_cell_not_walkable(x-1,y+1)):\n notAvailibleSpaces.append((x-1,y+1))\n\n return len(notAvailibleSpaces) is not 0",
"def remove_dead_ends(self) -> None:\n done = False\n while not done:\n done = True\n for y in range(1, self.current_map_width):\n for x in range(1, self.current_map_height):\n if not self.current_map[x][y].block_path:\n exits = 0\n for direction in DungeonGenerator.directions:\n dx, dy = direction\n if not self.current_map[x + dx][y + dy].block_path:\n exits += 1\n\n if exits == 1:\n done = False\n self.current_map[x][y].block_path = True\n self.current_map[x][y].texture = self.wall_texture"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and 1 otherwise; and features is a list of letters.
|
def get_example():
features = random.sample(string.ascii_letters, NUM_SAMPLES)
num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ])
num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ])
if num_capitalized > num_lowercase:
label = 1
else:
label = -1
return (label, features)
|
[
"def make_example(sequence, label):\n ex = tf.train.SequenceExample()\n # Context: sequence length and label\n ex.context.feature[LEN_FEAT_NAME].int64_list.value.append(len(sequence))\n ex.context.feature[LABEL_FEAT_NAME].float_list.value.append(label)\n\n # Feature lists: words\n fl_tokens = ex.feature_lists.feature_list[WORDS_FEAT_NAME]\n for word in sequence:\n fl_tokens.feature.add().int64_list.value.append(word)\n\n return ex",
"def make_example(features):\n\n def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n def _float32_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n def _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n feature_fns = {\n 'int64': _int64_feature,\n 'float32': _float32_feature,\n 'bytes': _bytes_feature\n }\n\n feature_dict = dict((key, feature_fns[feature_type](np.ravel(value)))\n for key, feature_type, value in features)\n\n # Create an example protocol buffer.\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n example_serial = example.SerializeToString()\n return example_serial",
"def train_test() -> Tuple[TextClassificationDataset, TextClassificationDataset, int]:\n train_examples, test_examples = datasets.IMDB.splits(\n text_field=data.Field(lower=False, sequential=False),\n label_field=data.Field(sequential=False, is_target=True)\n )\n\n def dataset(examples: data.dataset.Dataset) -> TextClassificationDataset:\n return TextClassificationDataset(\n texts=[example.text for example in examples],\n labels=[float(example.label == 'pos') for example in examples]\n )\n\n return dataset(train_examples), dataset(test_examples), 2",
"def label_names(boys_names, girls_names, func):\n seed(2) \n labeled_names = [(name, 'male') for name in boys_names] + \\\n [(name, 'female') for name in girls_names]\n\n featuresets = [(func(x), g) for (x, g) in labeled_names]\n shuffle(featuresets) \n train_set = featuresets[:-len(featuresets)/3]\n test_set = featuresets[-len(featuresets)/3:]\n return test_set, train_set",
"def run_bagofwords(args):\n train_fnames, train_labels, train_tokens = read_data(args.training_file)\n test_fnames, test_labels, test_tokens = read_data(args.test_file)\n\n le = preprocessing.LabelEncoder().fit(train_labels)\n\n clf = get_classifier(train_labels, train_tokens, args.num_features)\n\n predictions = predict(clf, test_tokens, num=args.num_features)\n\n if args.slice:\n tokens = test_tokens[args.slice].split()\n slices = [tokens[x:x + 100] for x in range(0, len(tokens), 100)]\n slices = [' '.join(slices[i]) for i in range(len(slices))]\n predict_file = predict_single(clf, slices, num=args.num_features)\n print(test_fnames[args.slice])\n for i in range(len(predict_file)):\n print(predict_file[i], \":\")\n print(slices[i])\n elif args.metrics:\n predictions = predict_single(clf, test_tokens, num=args.num_features)\n show_metrics(test_labels, predictions)\n else:\n for i in range(len(predictions)):\n print(test_fnames[i],end=' ')\n for j in range(len(predictions[i])):\n print(le.classes_[j] + \": \" + str(predictions[i][j]),end = ' ')\n print()",
"def train(self, features):",
"def learn(self, training_example, label):\r\n for i, feature in enumerate(training_example):\r\n self.features[i].update(feature, label)\r\n self.total[CLASS_INDICES[label.strip()]] += 1",
"def create_feature_names(self):\n if self.features is None:\n num_features = self.X.shape[1]\n self.features = [\"Feature_{:03d}\".format(i) for i in range(num_features)]",
"def _create_dev_examples(self, lines, set_type):\n print(\"creating training examples\")\n examples = []\n for (i, line) in enumerate(lines):\n guid = line[0]\n text_a = line[7]\n if line[6] == 1:\n label = '1'\n print(\"test found hate\")\n else:\n label = '0'\n print(\"test no hate\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples",
"def create_one_hot_labels(self, train_label, test_label): # by3ml two columns wa7ed lel zeroes we wa7ed lel ones\n \n maxlen = int(max(train_label.max(), test_label.max()))\n \n train = np.zeros((train_label.shape[0], train_label.shape[1], maxlen + 1))\n test = np.zeros((test_label.shape[0], test_label.shape[1], maxlen + 1))\n \n for i in range(train_label.shape[0]):\n for j in range(train_label.shape[1]):\n train[i, j, train_label[i, j]] = 1\n \n for i in range(test_label.shape[0]):\n for j in range(test_label.shape[1]):\n test[i, j, test_label[i, j]] = 1\n \n return train, test",
"def convert_single_example(self, example: WikiHopExample) -> tf.train.Example:\n\n features = self.convert_single_example_to_features(example=example)\n # Long features\n features_dict = collections.OrderedDict()\n features_dict[\"long_token_ids\"] = create_int_feature(\n features.input_features.long_token_ids)\n features_dict[\"long_token_type_ids\"] = create_int_feature(\n features.input_features.long_token_type_ids)\n features_dict[\"long_sentence_ids\"] = create_int_feature(\n features.input_features.long_sentence_ids)\n features_dict[\"long_paragraph_ids\"] = create_int_feature(\n features.input_features.long_paragraph_ids)\n features_dict[\"long_paragraph_breakpoints\"] = create_int_feature(\n features.input_features.long_paragraph_breakpoints)\n features_dict[\"l2g_linked_ids\"] = create_int_feature(\n features.l2g_linked_ids)\n\n # Global features\n features_dict[\"global_paragraph_breakpoints\"] = create_int_feature(\n features.input_features.global_paragraph_breakpoints)\n\n features_dict[\"global_token_ids\"] = create_int_feature(\n features.input_features.global_token_ids)\n\n features_dict[\"global_token_type_ids\"] = create_int_feature(\n features.input_features.global_token_type_ids)\n\n # Other features\n features_dict[\"is_real_example\"] = create_int_feature(\n [int(features.input_features.is_real_example)])\n\n features_dict[\"label_id\"] = create_float_feature(\n [float(example.ground_truth_answer_index)])\n\n # Debug features\n if not isinstance(example, PaddingInputExample):\n features_dict[\"example_id\"] = create_string_feature([example.example_id])\n features_dict[\"query\"] = create_string_feature([example.query])\n features_dict[\"candidates\"] = create_string_feature(\n example.candidate_answers)\n features_dict[\"ground_truth_answer\"] = create_string_feature(\n [example.ground_truth_answer])\n return tf.train.Example(features=tf.train.Features(feature=features_dict))",
"def predict(self, test_example):\r\n\r\n probs = self.features[0].get_probs(test_example[0])\r\n for i, feature in enumerate(test_example):\r\n probs *= self.features[i].get_probs(feature)\r\n total_examples = sum(self.total)\r\n probs *= self.total\r\n return CLASS_LABELS[np.argmax(probs)]",
"def test_extract_labeled_features(self):\n test_data = _get_test_data()\n featuresets, labels = pipelines.extract_labeled_features(\n data=test_data,\n attributes=['description'],\n )\n\n self.assertTrue(any(featuresets))\n self.assertTrue(any(labels))",
"def convert_single_example_to_features(\n self, example: WikiHopExample) -> WikiHopInputFeatures:\n\n if isinstance(example, PaddingInputExample):\n return WikiHopInputFeatures(\n input_features=InputFeatures(\n long_token_ids=[0] * self.long_seq_len,\n long_token_type_ids=[0] * self.long_seq_len,\n long_sentence_ids=[-1] * self.long_seq_len,\n long_paragraph_ids=[-1] * self.long_seq_len,\n long_paragraph_breakpoints=[0] * self.long_seq_len,\n global_paragraph_breakpoints=[0] * self.global_seq_len,\n global_token_ids=[0] * self.global_seq_len,\n global_token_type_ids=[0] * self.global_seq_len,\n is_real_example=False),\n l2g_linked_ids=[-1] * self.long_seq_len)\n\n # We do the following three steps here.\n # 1) Truncate to ensure that the total number of sentences across all the\n # docs is no more than `max_num_sentences`.\n #\n # 2) Truncate tokens further to ensure that the total number of WordPieces\n # across all of the docs is no more than `long_seq_len`.\n #\n # 3) Convert WikiHopExample to InputFeatures. The ETC features\n # are structured as follows:\n #\n # Global Input Structure:\n # [1 token per candidate][1 token per query\n # WordPiece][1 doc level token + 1 token per sentence in the doc]\n # [1 doc level token + 1 token per sentence of another doc]......[Padding]\n #\n # Long Input Structure:\n # [Candidate WordPieces][Query WordPieces][Doc tokens][Another doc tokens]...\n # [Padding]\n #\n # self.long_sentence_ids assignment:\n # 1) every candidate would be assigned a different sentence_id\n # 2) every query \"WordPiece\" would be assigned a different sentence_id\n # 3) every sentence in every doc would be assigned a different sentence_id\n # 4) sentence_ids are padded using -1s (and not 0s as is the case generally)\n #\n # self.long_paragraph_breakpoints:\n # 1) at the end of every candidate\n # 2) at the end of the query\n # 3) at the end of every doc\n #\n # self.long_paragraph_ids assignment:\n # 1) global input has a doc level token for every doc\n # 2) the goal of these ids is to match the doc tokens in the long input\n # to the corresponding global level tokens\n # 3) every candidate / query token gets a -1\n # 4) all tokens of a doc gets the same paragraph_id (the ids should be such\n # that global token at index i should map to doc with paragraph_id = i)\n # 5) paragraph_ids are padded using -1s (and not 0s as is the case generally)\n\n docs = example.docs\n num_query_tokens = len(self.tokenizer.tokenize(example.query))\n num_candidate_tokens = 0\n # Count candidate tokens\n candidates = example.candidate_answers\n for candidate in candidates:\n num_candidate_tokens += len(self.tokenizer.tokenize(candidate))\n\n max_allowed_doc_tokens = (\n self.long_seq_len - num_query_tokens - num_candidate_tokens)\n\n # List[List[]] to store list of sentences per doc.\n sentences_per_doc = truncate_doc_sentences(\n docs=docs,\n tokenizer=self.tokenizer,\n max_num_sentences=self.max_num_sentences,\n max_num_tokens=max_allowed_doc_tokens)\n\n begin_sentence_id = 0\n next_sentence_id = self._add_candidate_tokens(\n example=example, begin_sentence_id=begin_sentence_id)\n next_sentence_id = self._add_query_tokens(\n example=example, begin_sentence_id=next_sentence_id)\n self._add_doc_tokens(\n example=example,\n sentences_per_doc=sentences_per_doc,\n begin_sentence_id=next_sentence_id,\n max_allowed_doc_tokens=max_allowed_doc_tokens)\n self._link_long_global_tokens(example=example)\n\n self._pad_features()\n\n return WikiHopInputFeatures(\n input_features=InputFeatures(\n long_token_ids=self.long_token_ids,\n long_token_type_ids=self.long_token_type_ids,\n long_sentence_ids=self.long_sentence_ids,\n long_paragraph_ids=self.long_paragraph_ids,\n long_paragraph_breakpoints=self.long_paragraph_breakpoints,\n global_paragraph_breakpoints=self.global_paragraph_breakpoints,\n global_token_ids=self.global_token_ids,\n global_token_type_ids=self.global_token_type_ids),\n l2g_linked_ids=self.l2g_linked_ids)",
"def create_sets():\n train_labeled = []\n test_labeled = []\n train_lines, test_lines = read_files()\n word = []\n for line in train_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n train_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n word = []\n for line in test_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n test_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n\n return train_labeled, test_labeled",
"def create_training_example(background,activates,negatives):\n background=background-20\n #initialize y(label vector) of zeros\n y=np.zeros((1,Ty))\n #initialize segment times as empty list\n previous_segment=[]\n # Select 0-4 random \"activate\" audio clips from the entire list of \"activates\" recordings\n number_of_activates=np.random.randint(0,5)\n random_indices=np.random.randint(len(activates),size=number_of_activates)\n random_activates=[activates[i] for i in random_indices]\n \n for random_activate in random_activates:\n background,segment_time=insert_audio_clip(background,random_activate,previous_segments)\n segment_start,segment_end=segment_time\n y=insert_ones(y,segment_end)\n \n number_of_negatives=np.random.randint(0,3)\n random_indices=np.random.randint(len(negatives),size=number_of_negatives)\n random_negatives=[negatives[i] for i in random_indices]\n \n for random_negative in random_negatives:\n back_ground,_=insert_audio_clip(background,random_negative,previous_segments)\n \n # Standardize the volume of the audio clip \n background=match_target_amplitude(background,-20.0)\n file_handle=background.export(\"train\"+\".wav\",format=\"wav\")\n print(\"File (train.wav) was saved in your directory.\")\n x=graph_spectrogram(\"train.wav\")\n return x,y",
"def create_features(text):\n text = classifier.remove_url(text)\n text = classifier.remove_stopwords(text)\n\n sentiment_arr = csr_matrix(classifier.find_sentiment([text]))\n\n tfid_pkl = open('tfid.pkl', 'rb')\n tfidf = pickle.load(tfid_pkl)\n\n tfs_arr = tfidf.transform([text])\n\n pos_pkl = open('pos.pkl', 'rb')\n vec = pickle.load(pos_pkl)\n\n pos_arr = vec.transform(classifier.get_pos_features([text]))\n\n topic_pkl = open('topic.pkl', 'rb')\n lda = pickle.load(topic_pkl)\n\n topic_arr = lda.transform(tfs_arr)\n\n features = hstack([sentiment_arr, tfs_arr, pos_arr, topic_arr])\n\n tfid_pkl.close()\n pos_pkl.close()\n topic_pkl.close()\n\n return features",
"def predict_and_label(model, testing):\n # Give me the prediction and their corresponding label\n return testing.map(lambda p: (model.predict(p.features), p.label))",
"def classify_testing_dataset(forests, t_features):\r\n result = []\r\n for row in t_features:\r\n classified_labels = []\r\n for tree in forests:\r\n classified_labels.append(dt.classify_testing_record(tree, row))\r\n\r\n result.append(dt.majority_voting(classified_labels))\r\n return result",
"def generate_facial_features(facial_features, is_male):\n\n sentence = \"He\" if is_male else \"She\"\n sentence += \" has\"\n\n def nose_and_mouth(attribute):\n \"\"\"\n Returns a grammatically correct sentence based on the attribute\n \"\"\"\n\n if attribute == \"big nose\" or attribute == \"pointy nose\":\n return \"a \" + attribute\n elif attribute == \"mouth slightly open\":\n return \"a slightly open mouth\"\n return attribute\n\n if len(facial_features) == 1:\n attribute = nose_and_mouth(\" \".join(facial_features[0].lower().split(\"_\")))\n return sentence + \" \" + attribute + \".\"\n\n for i, attribute in enumerate(facial_features):\n attribute = nose_and_mouth(\" \".join(attribute.lower().split(\"_\")))\n\n if i == len(facial_features) - 1:\n sentence = sentence[:-1]\n sentence += \" and \" + attribute + \".\"\n else:\n sentence += \" \" + attribute + \",\"\n\n return sentence"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resample dataset so that the result contains the same number of lines per category in categ_column.
|
def rebalance_by_categorical(dataset: pd.DataFrame, categ_column: str, max_lines_by_categ: int = None,
seed: int = 1) -> pd.DataFrame:
categs = dataset[categ_column].value_counts().to_dict()
max_lines_by_categ = max_lines_by_categ if max_lines_by_categ else min(categs.values())
return pd.concat([(dataset
.loc[dataset[categ_column] == categ, :]
.sample(max_lines_by_categ, random_state=seed))
for categ in list(categs.keys())])
|
[
"def preprocess(df, freq, horiz):\n\n # resample demand to `freq`\n if \"category\" not in df:\n df.rename({\"family\": \"category\"}, axis=1, inplace=True)\n\n df.loc[:,\"timestamp\"] = pd.DatetimeIndex(df[\"timestamp\"])\n\n df2 = resample(df, freq, horiz)\n\n df2[\"timestamp\"] = pd.DatetimeIndex(df2[\"timestamp\"]) \\\n .strftime(\"%Y-%m-%d\")\n\n # add category-level sums\n df_cat_sums = df2.groupby([\"category\", \"timestamp\"]) \\\n .agg({\"demand\": sum}) \\\n .reset_index() \\\n .rename({\"demand\": \"category_sum\"}, axis=1)\n\n df2 = df2.merge(df_cat_sums, on=[\"timestamp\", \"category\"], how=\"left\")\n\n# df2[\"timestamp\"] = pd.DatetimeIndex(df2[\"timestamp\"])\n\n df2.sort_values(by=[\"channel\", \"category\", \"item_id\", \"timestamp\"],\n inplace=True)\n\n# df2.set_index(\"timestamp\", inplace=True)\n\n # DEBUG\n# _dd = \\\n# df2.groupby([\"channel\", \"category\", \"item_id\"]) \\\n# .agg({\"timestamp\": \"count\"}) \\\n# .rename({\"timestamp\": \"count\"}, axis=1)\n#\n# assert(_dd[\"count\"].min() >= horiz)\n # DEBUG\n\n return df2",
"def resample(X_train, y_train, minority_factor=2):\n y_counts = np.bincount(y_train)\n minority_class = np.argsort(-y_counts)[1] # second biggest class\n minority_size = y_counts[minority_class]\n ixs = []\n for label in np.unique(y_train):\n ixs_for_label = np.nonzero(y_train == label)[0]\n ixs.extend(\n list(np.random.choice(\n ixs_for_label,\n min(len(ixs_for_label), minority_size * minority_factor),\n replace=False\n ))\n )\n return X_train[ixs, :], y_train[ixs]",
"def resample(x):\n\t\n\ty = []\n\tr = np.random.random_integers(0, len(x) - 1, len(x))\n\ty = np.take(x, r)\n\ty.sort()\n\treturn y",
"def resample(self):\n for unit in self.model:\n if isinstance(unit, Unit):\n unit.sample()\n else:\n for u in unit:\n u.sample()",
"def resampler(size, df):\r\n return df.resample(size).mean()",
"def summarize_data(df, max_categories=10): \n try:\n import sidetable\n except:\n # %pip install sidetable\n # !pip install sidetable\n import sidetable\n \n \n df_list=[]\n capt_list=[]\n \n for col in list(df.columns):\n df_freq = df.stb.freq([col])\n if len(df_freq) > max_categories:\n col_name = col + '_Repetitions'\n df_freq.rename(columns={'count':col_name}, inplace=True)\n # display(df_freq.stb.freq([col_name], style=True))\n capt_list.append(col_name)\n df_list.append(df_freq.stb.freq([col_name]))\n else:\n # display(df.stb.freq([col], style=True))\n capt_list.append(col)\n df_list.append(df_freq)\n return df_list, capt_list",
"def squeeze(dataset, how: str = 'day'):\n return dataset.groupby(by = lambda ts: timestamp_floor(ts, how = how))",
"def resample(path, upscale_factor=2):\n with rasterio.open(path) as dataset:\n\n # resample data to target shape\n data = dataset.read(out_shape=(dataset.count,\n int(dataset.height * upscale_factor),\n int(dataset.width * upscale_factor)),\n resampling=Resampling.mode)\n\n # scale image transform\n transform = dataset.transform * dataset.transform.scale(\n (dataset.width / data.shape[-1]), (dataset.height / data.shape[-2]))\n\n #write new dataset\n metadata = dataset.meta.copy()\n\n metadata.update({\n 'transform': transform,\n \"height\": dataset.height * upscale_factor,\n \"width\": dataset.width * upscale_factor\n })\n\n basename = os.path.splitext(path)[0]\n filename = \"{}_resampled.tif\".format(basename)\n\n with rasterio.open(filename, \"w\", **metadata) as dest:\n dest.write(data)\n\n return filename",
"def resample(self, data, length: int, boxcar=True):\n old_length = len(data)\n new_length = length\n if old_length == new_length:\n return data\n if new_length == 0 or old_length == 0:\n return np.array([])\n\n if new_length > old_length:\n # Upsample\n return self._upsample(data, new_length)\n else:\n # Downsample\n if old_length % new_length: \n # Requires upsampling to nearest multiple first, then reducing\n data = self._upsample(data, int(np.ceil(old_length / new_length) * new_length))\n old_length = len(data)\n return self._downsample(data, int(old_length / new_length), boxcar=boxcar)",
"def df_resample(dataframe, time_bin):\n vol_df = dataframe.copy()\n vol_df['volume'] = 1\n vol_df = vol_df.resample(time_bin).sum()\n vol_df.dropna(inplace=True)\n\n dataframe = dataframe.resample(time_bin).mean()\n dataframe.dropna(inplace=True)\n\n return dataframe.join(vol_df['volume'])",
"def resample(\n self,\n sampling_rate=None,\n variables=None,\n force_dense=False,\n in_place=False,\n kind=\"linear\",\n ):\n return self._densify_and_resample(\n sampling_rate,\n variables,\n force_dense=force_dense,\n in_place=in_place,\n kind=kind,\n resample_dense=True,\n )",
"def resample_for_update(self):\n for freq in self.df_klines.df_freqs:\n start = time.time()\n df_freq = getattr(self.df_klines, 'df_' + freq)\n df_freq = df_freq.drop(df_freq.tail(1).index)\n t_latest = df_freq.tail(1)['end_t'].values.item()\n df_new = self.ws_hist.loc[self.ws_hist['start_t'] > t_latest]\n if freq in self.df_klines.df_freqs[1:]:\n df_new = resample(df_new, freq)\n\n df_new = self.indicator(df_freq.loc[:, msg_dict.keys()].tail(110).append(df_new), full_df=False, d1=False, d2=False, smooth_periods=[5])\n df_freq = df_freq.append(df_new)\n setattr(self.df_klines, 'df_' + freq, df_freq)\n printv('Resample freq {} time: {}'.format(freq, time.time()-start), self.verbose)",
"def build_reduced_grouped_dataframe(self):\n if self.reduced_grouped_dataframe is None:\n reduced_dataframe = self.get_reduced_dataframe()\n self.reduced_grouped_dataframe = reduced_dataframe.groupby(CASE_CONCEPT_NAME)",
"def resampleTotalNutrition(df, how = \"D\"):\n\n outputDF = df.resample(how).sum()\n return outputDF",
"def resample(self, n):\n if n==len(self.times):\n return\n self.times = np.linspace(self.times[0], self.times[-1], n)",
"def resample(self, sample_docs_id_pre):\n #sample_docs_id = sample_docs_id_pre[:int(self.num_doc/2)]\n #remain_index = list(set(range(len(self.t_a_k))) - set(sample_docs_id))\n #remain_index_sample = random.sample(remain_index, int(self.num_doc/2))\n #sample_docs_id += remain_index_sample\n self.get_samples(sample_docs_id_pre[:self.num_doc])",
"def resample_ts (data, index, mode = \"mean\"):\n\n resampled_ts = []\n rows_ts = []\n j = 0\n\n for i in range (len (data)):\n \tif j >= len (index):\n \t break\n\n \tif (data[i][0] > index [j]):\n \t\tif len (rows_ts) == 0:\n \t\t\tresampled_ts. append ([index [j]] + [0 for i in range (len (data [0][1:]) )])\n \t\telse:\n \t\t\tresampled_ts. append ([index [j]] + regroupe_data (rows_ts, mode))\n \t\tinitializer = 0\n \t\tj += 1\n \t\trows_ts = []\n\n \trows_ts. append (data [i][1:])\n\n if len (rows_ts) > 0 and j < len (index):\n resampled_ts. append ([index [j]] + regroupe_data (rows_ts, mode))\n\n return np. array (resampled_ts)",
"def resample_batch(X,Y,n):\n sample_range = random.sample(range(len(X)),n)\n return [float(X[i]) for i in sample_range], [float(Y[i]) for i in sample_range]",
"def compress_dataframe_time_interval(processed_df, interval):\n resampled_df = processed_df.resample('{}min'.format(interval), on='Time').mean()\n return resampled_df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Makes two brackets, one from (x11, y1) to (x12, y1) and the second from (x21, y2) to (x22, y2), and connects them with a line (with given arrow style)
|
def make_range_connection_bracket(x11: float, x12: float, x21: float, x22: float, y1: float, y2: float, arrow_style: m_patches.ArrowStyle, color: str,
opacity: float = 1., linewidth: float = 1.):
middle_1 = (x11 + x21) / 2
middle_2 = (x12 + x22) / 2
y11 = y1 + 1
y21 = y2 + 1
p1 = m_patches.FancyArrowPatch(path=m_Path([(middle_1, y1), (middle_1, y11)],
[m_Path.MOVETO, m_Path.LINETO]),
fc="none", lw=linewidth, color=color, alpha=opacity,
arrowstyle=m_patches.ArrowStyle.BracketA(widthA=middle_1,
lengthA=3,
angleA=None))
p2 = make_connection(middle_1, y11, middle_2, y21, color, opacity, linewidth, arrow_style)
p3 = m_patches.FancyArrowPatch(path=m_Path([(middle_2, y2), (middle_2, y21)],
[m_Path.MOVETO, m_Path.LINETO]),
fc="none", lw=linewidth, color=color, alpha=opacity,
arrowstyle=m_patches.ArrowStyle.BracketA(widthA=middle_2,
lengthA=3,
angleA=None))
return [p1, p2, p3]
|
[
"def arrow(x0,x1,y0,y1,col=\"Black\"):\n ob = lines([(x0,y0),(x1,y1)],col)\n ob[\"arrows\"] = quoteString(\"to\")\n return ob",
"def _draw_arrow(self, x1, y1, x2, y2, tag=None):\n line = self.canvas.create_line((x1, y1), (x2, y2))\n poly = self.canvas.create_polygon((x2-13, y2+5),\\\n (x2, y2),\\\n (x2-13, y2-5))\n\n if y2 != y1:\n # negative because the coords of window are oriented the other way\n turn = -math.atan((y2-y1)/(x2-x1))\n else:\n turn = 0\n\n x0, y0 = self.canvas.coords(poly)[:2]\n x1, y1 = self.canvas.coords(poly)[2:4]\n x2, y2 = self.canvas.coords(poly)[4:]\n # rotating about the arrowhead\n cx, cy = x1, y1\n\n if turn != 0:\n # translate to center\n x0 = x0 - cx\n y0 = y0 - cy\n # rotate\n _x0 = x0 * math.cos(turn) + y0 * math.sin(turn)\n _y0 = -x0 * math.sin(turn) + y0 * math.cos(turn)\n\n # translate to center\n x2 = x2 - cx\n y2 = y2 - cy\n # rotate\n _x2 = x2 * math.cos(turn) + y2 * math.sin(turn)\n _y2 = -x2 * math.sin(turn) + y2 * math.cos(turn)\n\n # set the coordinates to the rotated ones\n self.canvas.coords(poly, [_x0 + cx, _y0 + cy, x1, y1, _x2 + cx, _y2 + cy])\n\n if tag:\n self.canvas.itemconfig(line, tag=tag)\n self.canvas.itemconfig(poly, tag=tag)\n\n return (line, poly)",
"def add_arrow_coord(self, line, arrow_height, arrow_width, recess):\r\n # arrow = SvgPolygon(_maxlen=4)\r\n if line.type == 'polyline':\r\n xe = line.coordsX[-1]\r\n ye = line.coordsY[-1]\r\n xp = line.coordsX[-2]\r\n yp = line.coordsY[-2]\r\n else:\r\n xe = line.attributes['x2']\r\n ye = line.attributes['y2']\r\n xp = line.attributes['x1']\r\n yp = line.attributes['y1']\r\n h = arrow_height\r\n if arrow_width == 0:\r\n w = arrow_height / 3\r\n else:\r\n w = arrow_width\r\n r = recess\r\n self.add_coord(xe, ye)\r\n dx = xe - xp\r\n dy = ye - yp\r\n de = math.sqrt(dx**2 + dy**2)\r\n xh = xe - h * dx / de\r\n yh = ye - h * dy / de\r\n x1 = xh + w * dy / de\r\n y1 = yh - w * dx / de\r\n self.add_coord(x1, y1)\r\n x2 = xe - (h - r) * dx / de\r\n y2 = ye - (h - r) * dy / de\r\n self.add_coord(x2, y2)\r\n x3 = xh - w * dy / de\r\n y3 = yh + w * dx / de\r\n self.add_coord(x3, y3)",
"def _path(self, dc, coord1, coord2, drawstyle):\n if drawstyle == 'line':\n # Straight line between points.\n line = [coord1, coord2]\n elif drawstyle == 'steps-pre':\n # Up/down to next Y, then right to next X\n intermediate = [coord1[0], coord2[1]]\n line = [coord1, intermediate, coord2]\n elif drawstyle == 'steps-post':\n # Right to next X, then up/down to Y\n intermediate = [coord2[0], coord1[1]]\n line = [coord1, intermediate, coord2]\n elif drawstyle == 'steps-mid-x':\n # need 3 lines between points: right -> up/down -> right\n mid_x = ((coord2[0] - coord1[0]) / 2) + coord1[0]\n intermediate1 = [mid_x, coord1[1]]\n intermediate2 = [mid_x, coord2[1]]\n line = [coord1, intermediate1, intermediate2, coord2]\n elif drawstyle == 'steps-mid-y':\n # need 3 lines between points: up/down -> right -> up/down\n mid_y = ((coord2[1] - coord1[1]) / 2) + coord1[1]\n intermediate1 = [coord1[0], mid_y]\n intermediate2 = [coord2[0], mid_y]\n line = [coord1, intermediate1, intermediate2, coord2]\n else:\n err_txt = \"Invalid drawstyle '{}'. Must be one of {}.\"\n raise ValueError(err_txt.format(drawstyle, self._drawstyles))\n\n line = [(int(p[0]), int(p[1])) for p in line]\n dc.DrawLines(line)",
"def linje(x1, y1, x2, y2): \n pu() # pen up - rita inte\n goto(x1, y1) # flytta markören\n pd() # pen down - rita \n goto(x2, y2) # flytta markören så att en linje ritas",
"def line_style_1():\n\n return(\"+ - - - - + - - - - +\")",
"def draw_straight_arrow(self, start_point, end_point):\n\n self.draw_line_segment(start_point, end_point)\n self.draw_arrow_tip(end_point, self._compute_line_angle(start_point, end_point))",
"def drawLine(tortle, x_start, y_start, x_end, y_end):\n tortle.up()\n tortle.goto(x_start, y_start)\n tortle.down()\n tortle.goto(x_end, y_end)",
"def line_style_2():\n\n return(\"| | |\")",
"def draw_arrow_tip(self, tip, arrow_direction_angle):\n\n tip_position = self.project_point_to_canvas(tip)\n\n size = self.arrow_width_svgpx\n opening_angle = self.arrow_opening_angle\n curvature = self.arrow_curvature\n\n tan_opening = math.tan(opening_angle)\n semi_height = size * tan_opening\n inner_coordinate = size * (1 - curvature)\n top_point = [ size, semi_height ]\n bottom_point = [ size, - semi_height ]\n middle_point = [ inner_coordinate, 0 ]\n control_height = 0.5 * inner_coordinate * tan_opening\n control_vector_top = [ inner_coordinate, control_height ]\n control_vector_bottom = [ inner_coordinate, - control_height ]\n tip_point = [ 0, 0 ]\n path_command = self._make_svg_path_M_command([ top_point, tip_point, bottom_point ] )\n path_command += ' ' + self._make_svg_path_C_command([ middle_point, top_point ],\n [ bottom_point, control_vector_bottom, control_vector_top, top_point ])\n path_command += ' ' + self._make_svg_path_Z_command()\n path = self.svgwrite_object.path(d = path_command,\n style = self._make_svg_style_string(fill_color = self.stroke_color, dash_mode = \"none\"))\n # be wary of that featured bug that reverse the order of the transformations\n path.translate(tip_position)\n path.rotate(math.degrees(math.pi - arrow_direction_angle), center = [ 0, 0 ]) # also: angles are negative\n #\n self.svgwrite_object.add(path)",
"def makeArrowPath(headLen=20, headWidth=None, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):\n if headWidth is None:\n headWidth = headLen * math.tan(math.radians(tipAngle * 0.5))\n path = QtGui.QPainterPath()\n path.moveTo(0,0)\n path.lineTo(headLen, -headWidth)\n if tailLen is None:\n innerY = headLen - headWidth * math.tan(math.radians(baseAngle))\n path.lineTo(innerY, 0)\n else:\n tailWidth *= 0.5\n innerY = headLen - (headWidth-tailWidth) * math.tan(math.radians(baseAngle))\n path.lineTo(innerY, -tailWidth)\n path.lineTo(headLen + tailLen, -tailWidth)\n path.lineTo(headLen + tailLen, tailWidth)\n path.lineTo(innerY, tailWidth)\n path.lineTo(headLen, headWidth)\n path.lineTo(0,0)\n return path",
"def svgLine(start, end):\n\tsvgTag('line x1=\"%d\" y1=\"%d\" x2=\"%d\" y2=\"%d\"' % (start+end) )",
"def add_arrow(route_map, pt1, pt2, **extra):\n if pt1[0] == pt2[0]:\n # For debug routes from the location to itself.\n return\n if pt1[0] - pt2[0] > MAP_WIDTH / 2:\n two_parts_correction = 1\n elif pt2[0] - pt1[0] > MAP_WIDTH / 2:\n two_parts_correction = -1\n else:\n two_parts_correction = 0\n pt1 = (pt1[0] - two_parts_correction * MAP_WIDTH, pt1[1])\n\n v = (pt1[0] - pt2[0], pt1[1] - pt2[1])\n norm = MAP_ARROW_OFFSET / hypot(*v)\n pt1 = (pt1[0] - norm * v[0], pt1[1] - norm * v[1])\n pt2 = (pt2[0] + norm * v[0], pt2[1] + norm * v[1])\n\n norm = MAP_ARROWHEAD_LENGTH / hypot(*v)\n points = [\n pt1, (\n pt2[0] + norm * v[0] + MAP_ARROWHEAD_TAN * norm * v[1],\n pt2[1] + norm * v[1] - MAP_ARROWHEAD_TAN * norm * v[0],\n ), (\n pt2[0] + norm * v[0] - MAP_ARROWHEAD_TAN * norm * v[1],\n pt2[1] + norm * v[1] + MAP_ARROWHEAD_TAN * norm * v[0],\n ),\n ]\n if two_parts_correction:\n route_map.add(route_map.line(\n (pt1[0] + two_parts_correction * MAP_WIDTH, pt1[1]),\n (pt2[0] + two_parts_correction * MAP_WIDTH, pt2[1]),\n **extra\n ))\n for pt in points:\n route_map.add(route_map.line(pt, pt2, **extra))",
"def draw_arrowhead(canvas, color, head, nx, ny, length):\r\n ax = length * (-ny - nx)\r\n ay = length * (nx - ny)\r\n points = \\\r\n [\r\n head[0] + ax, head[1] + ay,\r\n head[0], head[1],\r\n head[0] - ay, head[1] + ax\r\n ]\r\n canvas.create_polygon(points, fill=color)",
"def triangle(x1: float, y1: float, x2: float, y2: float, x3: float, y3: float) -> None:\n path = skia.Path().moveTo(x1, y1).lineTo(x2, y2).lineTo(x3, y3).close()\n __canvas.drawPath(path, __fill_paint())\n __canvas.drawPath(path, __stroke_paint())",
"def rounded_edges_or_pointed_ends_rectangle(ctx, x, y, w, h,\n arrow_right=False,\n arrow_left=False, r=5):\n ctx.move_to(x+r, y) # Move to A\n ctx.line_to(x+w-r, y) # Straight line to B\n if arrow_right:\n ctx.line_to(x+w, y+h/2) # Straight line to K\n ctx.line_to(x+w-r, y+h) # Straight line to E\n else:\n ctx.curve_to(x+w, y, x+w, y, x+w, y+r) # Curve to C: 2 ctrl pts at Q\n ctx.line_to(x+w, y+h-r) # Move to D\n ctx.curve_to(x+w, y+h, x+w, y+h, x+w-r, y+h) # Curve to E\n ctx.line_to(x+r, y+h) # Line to F\n if arrow_left:\n ctx.line_to(x, y+h/2) # Straight line to J\n ctx.line_to(x+r, y) # Straight line to A\n else:\n ctx.curve_to(x, y+h, x, y+h, x, y+h-r) # Curve to G\n ctx.line_to(x, y+r) # Line to H\n ctx.curve_to(x, y, x, y, x+r, y) # Curve to A",
"def DrawArrow(self, dc, u, v):\n from math import pi, atan, cos, sin\n pi_6 = pi/6\n points = []\n x1, y1 = u\n x2, y2 = v\n a = x2 - x1\n b = y2 - y1\n if abs(a) < 0.01: # vertical segment\n if b > 0:\n alpha = -pi/2\n else:\n alpha = pi/2\n else:\n if a==0:\n alpha = pi/2 # TODO ?\n else:\n alpha = atan(b/a)\n if a > 0:\n alpha += pi\n alpha1 = alpha + pi_6\n alpha2 = alpha - pi_6\n size = self._arrowSize\n points.append((x2 + size * cos(alpha1), y2 + size * sin(alpha1)))\n points.append((x2, y2))\n points.append((x2 + size * cos(alpha2), y2 + size * sin(alpha2)))\n dc.DrawPolygon(points)",
"def create_triangle(length, upper_left_x, upper_left_y):\n\tline_1 = GLine(upper_left_x, upper_left_y, upper_left_x + length, upper_left_y)\n\tline_2 = GLine(upper_left_x + length, upper_left_y, length * 0.5 + upper_left_x, upper_left_y + length * 0.866)\n\tline_3 = GLine(upper_left_x, upper_left_y, length * 0.5 + upper_left_x, upper_left_y + length * 0.866)\n\twindow.add(line_1)\n\twindow.add(line_2)\n\twindow.add(line_3)",
"def create_line(self, start_point=None, end_point=None):\n selected_points = (start_point, end_point) \n if None in selected_points:\n selected_points = self.get_selected_points()\n if len(selected_points) > 1:\n line = GraphicsLib.GraphicsItemLine(selected_points[0],\n selected_points[1])\n self.add_shape(line)\n return line\n else:\n msg = \"Please select two points (with same kappa and phi) \" + \\\n \"to create a helical line\"\n logging.getLogger(\"GUI\").error(msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
this test that no matter what the output is, the result is fit with the dns1123 validation regex
|
def test_clean_name_dns1123(self, input_value, func, max_size):
result = func(input_value)
# this is a regex used by k8s to validate the right name for dns1123
assert re.match(r"(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?", result)
assert len(result) <= max_size
|
[
"def test_ip_adress(result):\n\n assert re.match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.)'\n r'{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$',\n result.json()['query']), \\\n \"The value of a 'query' field is not correct IP address.\"",
"def test_sanitize_job_name(self, job_name):\n\n def valid(x):\n return k.DNS_1123_RE.match(x) is not None\n\n sanitized = util.sanitize_job_name(job_name)\n\n if valid(job_name):\n self.assertEqual(job_name, sanitized)\n else:\n self.assertTrue(valid(sanitized))\n\n # idempotency check\n self.assertEqual(sanitized, util.sanitize_job_name(sanitized))\n\n return",
"def test_ipaddress_getasn(dummy_asndb):\n ipaddr = IPAddress('1.1.1.1', 'Wed Dec 31 16:25:00 2013')\n assert ipaddr.get_asn(dummy_asndb) == dummy_asndb.lookup('1.1.1.1')[0]",
"def test_internal_address_filter_wildcard(self):\n af1 = AddressFilter(\"i-?\")\n assert af1.match(\"i1\")\n assert af1.match(\"i 2\")\n assert af1.match(\"i-3\")\n assert af1.match(InternalGroupAddress(\"i-4\"))\n assert not af1.match(\"1\")\n assert not af1.match(GroupAddress(1))\n assert not af1.match(\"i11\")\n assert not af1.match(\"i 11\")\n assert not af1.match(\"i-11\")\n assert not af1.match(InternalGroupAddress(\"i-11\"))\n\n af2 = AddressFilter(\"i-t?st\")\n assert af2.match(\"it1st\")\n assert af2.match(\"i t2st\")\n assert af2.match(\"i-test\")\n assert af2.match(InternalGroupAddress(\"i-test\"))\n assert not af2.match(\"1\")\n assert not af2.match(GroupAddress(1))\n assert not af2.match(\"i11\")\n assert not af2.match(\"i tst\")\n assert not af2.match(\"i-teest\")\n assert not af2.match(InternalGroupAddress(\"i-tst\"))\n\n af3 = AddressFilter(\"i-*\")\n assert af3.match(\"i1\")\n assert af3.match(\"i asdf\")\n assert af3.match(\"i-3sdf\")\n assert af3.match(InternalGroupAddress(\"i-4\"))\n assert not af3.match(\"1\")\n assert not af3.match(GroupAddress(1))\n assert af3.match(\"i11\")\n assert af3.match(\"i 11??\")\n assert af3.match(\"i-11*\")\n assert af3.match(InternalGroupAddress(\"i-11\"))\n\n af4 = AddressFilter(\"i-t*t\")\n assert af4.match(\"it1t\")\n assert af4.match(\"i t22t\")\n assert af4.match(\"i-testt\")\n assert af4.match(\"i-tt\")\n assert af4.match(InternalGroupAddress(\"i-t333t\"))\n assert not af4.match(\"1\")\n assert not af4.match(GroupAddress(1))\n assert not af4.match(\"i testx\")\n assert not af4.match(\"i-11test\")\n assert not af4.match(InternalGroupAddress(\"i-11\"))",
"def test_ip_address_ipv4_dotnetmask(self):\n data = r'192.168.0.1/255.255.128.0'\n expected = json.loads(r'''{\"version\":4,\"max_prefix_length\":32,\"ip\":\"192.168.0.1\",\"ip_compressed\":\"192.168.0.1\",\"ip_exploded\":\"192.168.0.1\",\"ip_split\":[\"192\",\"168\",\"0\",\"1\"],\"scope_id\":null,\"ipv4_mapped\":null,\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"1.0.168.192.in-addr.arpa\",\"network\":\"192.168.0.0\",\"broadcast\":\"192.168.127.255\",\"hostmask\":\"0.0.127.255\",\"netmask\":\"255.255.128.0\",\"cidr_netmask\":17,\"hosts\":32766,\"first_host\":\"192.168.0.1\",\"last_host\":\"192.168.127.254\",\"is_multicast\":false,\"is_private\":true,\"is_global\":false,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":false,\"is_unspecified\":false,\"int\":{\"ip\":3232235521,\"network\":3232235520,\"broadcast\":3232268287,\"first_host\":3232235521,\"last_host\":3232268286},\"hex\":{\"ip\":\"c0:a8:00:01\",\"network\":\"c0:a8:00:00\",\"broadcast\":\"c0:a8:7f:ff\",\"hostmask\":\"00:00:7f:ff\",\"netmask\":\"ff:ff:80:00\",\"first_host\":\"c0:a8:00:01\",\"last_host\":\"c0:a8:7f:fe\"},\"bin\":{\"ip\":\"11000000101010000000000000000001\",\"network\":\"11000000101010000000000000000000\",\"broadcast\":\"11000000101010000111111111111111\",\"hostmask\":\"00000000000000000111111111111111\",\"netmask\":\"11111111111111111000000000000000\",\"first_host\":\"11000000101010000000000000000001\",\"last_host\":\"11000000101010000111111111111110\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)",
"def test_domain():\n assert tst.domain(99001203) == 5",
"def testIPAddressPatternMatchesValidIPAddresses(self):\n pattern = regex.REGEXES_AND_ERRORS_DICTIONARY['ipAddressPattern']\n # IP v6 addresses taken from example of wikipedia at this address:\n # https://en.wikipedia.org/wiki/IPv6_address\n # Feel free to add more as necessary.\n potentialIps = [\n '127.0.0.0',\n '2001:0db8:85a3:0000:0000:8a2e:0370:7334',\n '2001:db8:85a3:0:0:8a2e:370:7334',\n '0:0:0:0:0:0:0:1',\n '0:0:0:0:0:0:0:0',\n '::1',\n '::',\n ]\n self._patternMatchHelper(pattern, potentialIps)",
"def test_staking_validators_validator_addr_get(self):\n pass",
"def test_create_domain_name_query(self):\n encoded_dns_message: BytesIO = DNSMessageUtilities.create_domain_name_query('www.cs.ubc.ca', 12345, 15)\n\n encoded_dns_message.seek(0) # need to start reading the dns message from the start of the BytesIO object\n dns_message = DNSMessage.decode_dns_message(encoded_dns_message)\n\n # Check that the query id is correct\n self.assertEqual(dns_message.query_id, 12345)\n\n # Check that flags are correct\n self.assertEqual(dns_message.is_response, False)\n self.assertEqual(dns_message.opcode, 0)\n self.assertEqual(dns_message.authoritative, False)\n self.assertEqual(dns_message.is_truncated, False)\n self.assertEqual(dns_message.recursion_desired, False)\n self.assertEqual(dns_message.recursion_available, False)\n self.assertEqual(dns_message.rcode, 0)\n\n # Check that record/question counts are correct\n self.assertEqual(dns_message.question_count, 1)\n self.assertEqual(dns_message.answer_count, 0)\n self.assertEqual(dns_message.nameserver_count, 0)\n self.assertEqual(dns_message.additional_count, 0)\n\n # Check that the question was properly decoded\n self.assertEqual(dns_message.dns_questions[0].name, 'www.cs.ubc.ca')\n self.assertEqual(dns_message.dns_questions[0].type, 15)\n self.assertEqual(dns_message.dns_questions[0].response_class, 1)",
"def test_valid_ipv4(self):\n self.assertEqual(is_valid_ip_address(\"192.168.0.55\"), True)",
"def validate_dns_name(data, max_len=db_constants.FQDN_FIELD_SIZE):\n msg = _validate_dns_format(data, max_len)\n if msg:\n return msg\n\n request_dns_name = _get_request_dns_name(data)\n if request_dns_name:\n dns_domain = _get_dns_domain_config()\n msg = _validate_dns_name_with_dns_domain(request_dns_name, dns_domain)\n if msg:\n return msg",
"def dns_labels():\n # This is too limited, but whatever\n return s.from_regex(u'\\\\A[a-z]{3}[a-z0-9-]{0,21}[a-z]\\\\Z')",
"def test_dns_alias_str(self):\n record = DnsRecord('com', 'example.com', '1.2.3.4')\n alias = DnsRecord('com', 'www.example.com', record)\n self.assertEqual(str(alias), \"www.example IN CNAME example\")",
"def validate_ip_addresses(value: str) -> str:\n if len(value) > 10:\n return \"have length less than or equal to 10\"\n return \"\"",
"def test_internal_address_filter_exact(self):\n af1 = AddressFilter(\"i-1\")\n assert af1.match(\"i1\")\n assert af1.match(\"i 1\")\n assert af1.match(\"i-1\")\n assert af1.match(InternalGroupAddress(\"i-1\"))\n assert not af1.match(\"1\")\n assert not af1.match(GroupAddress(1))",
"def test_has_patient_id_regex(self):\n patient_id_groups = re.match(self.spellgen.patient_id_regex,\n 'nhc_demo_patient_666')\n self.assertEqual(len(patient_id_groups.groups()), 1,\n 'Incorrect regex groups')\n self.assertEqual(patient_id_groups.groups()[0],\n '666',\n 'Incorrect Regex match')",
"def test_is_valid_matrix_server_name(self):\n self.assertTrue(is_valid_matrix_server_name(\"9.9.9.9\"))\n self.assertTrue(is_valid_matrix_server_name(\"9.9.9.9:4242\"))\n self.assertTrue(is_valid_matrix_server_name(\"[::]\"))\n self.assertTrue(is_valid_matrix_server_name(\"[::]:4242\"))\n self.assertTrue(is_valid_matrix_server_name(\"[a:b:c::]:4242\"))\n\n self.assertTrue(is_valid_matrix_server_name(\"example.com\"))\n self.assertTrue(is_valid_matrix_server_name(\"EXAMPLE.COM\"))\n self.assertTrue(is_valid_matrix_server_name(\"ExAmPlE.CoM\"))\n self.assertTrue(is_valid_matrix_server_name(\"example.com:4242\"))\n self.assertTrue(is_valid_matrix_server_name(\"localhost\"))\n self.assertTrue(is_valid_matrix_server_name(\"localhost:9000\"))\n self.assertTrue(is_valid_matrix_server_name(\"a.b.c.d:1234\"))\n\n self.assertFalse(is_valid_matrix_server_name(\"[:::]\"))\n self.assertFalse(is_valid_matrix_server_name(\"a:b:c::\"))\n\n self.assertFalse(is_valid_matrix_server_name(\"example.com:65536\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com:0\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com:-1\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com:a\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com: \"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com:04242\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com: 4242\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com/example.com\"))\n self.assertFalse(is_valid_matrix_server_name(\"example.com#example.com\"))",
"def test_simple(self):\n self.assertEqual(format_addr('info@example.com'), u'info@example.com')",
"def test_ip_address_ipv4_cidr(self):\n data = r'192.168.2.10/24'\n expected = json.loads(r'''{\"version\":4,\"max_prefix_length\":32,\"ip\":\"192.168.2.10\",\"ip_compressed\":\"192.168.2.10\",\"ip_exploded\":\"192.168.2.10\",\"ip_split\":[\"192\",\"168\",\"2\",\"10\"],\"scope_id\":null,\"ipv4_mapped\":null,\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"10.2.168.192.in-addr.arpa\",\"network\":\"192.168.2.0\",\"broadcast\":\"192.168.2.255\",\"hostmask\":\"0.0.0.255\",\"netmask\":\"255.255.255.0\",\"cidr_netmask\":24,\"hosts\":254,\"first_host\":\"192.168.2.1\",\"last_host\":\"192.168.2.254\",\"is_multicast\":false,\"is_private\":true,\"is_global\":false,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":false,\"is_unspecified\":false,\"int\":{\"ip\":3232236042,\"network\":3232236032,\"broadcast\":3232236287,\"first_host\":3232236033,\"last_host\":3232236286},\"hex\":{\"ip\":\"c0:a8:02:0a\",\"network\":\"c0:a8:02:00\",\"broadcast\":\"c0:a8:02:ff\",\"hostmask\":\"00:00:00:ff\",\"netmask\":\"ff:ff:ff:00\",\"first_host\":\"c0:a8:02:01\",\"last_host\":\"c0:a8:02:fe\"},\"bin\":{\"ip\":\"11000000101010000000001000001010\",\"network\":\"11000000101010000000001000000000\",\"broadcast\":\"11000000101010000000001011111111\",\"hostmask\":\"00000000000000000000000011111111\",\"netmask\":\"11111111111111111111111100000000\",\"first_host\":\"11000000101010000000001000000001\",\"last_host\":\"11000000101010000000001011111110\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override to ensure that the ``choices`` argument is a ``Choices`` object.
|
def __init__(self, choices, *args, **kwargs):
super(NamedExtendedChoiceFormField, self).__init__(*args, **kwargs)
if not isinstance(choices, Choices):
raise ValueError("`choices` must be an instance of `extended_choices.Choices`.")
self.choices = choices
|
[
"def assert_choices(self, choices):\n self.assert_in_help('choices: %s' % choices)",
"def _validate_choices(self, value):\n # Check choices if passed\n if self.choices:\n if value not in self.choices:\n raise ValidationError(\n 'Value {} is restricted by choices: {}'.format(\n value, self.choices))\n return True",
"def __init__(self, choices):\n if type(choices) is dict:\n for choice_type, (choice, subchoices) in choices.items():\n self.append(Choice(choice_type, choice, subchoices))",
"def SetChoices(self, choices):\n if not isinstance(choices, list):\n self._choices = [ x for x in choices]\n else:\n self._choices = choices\n self.dropdownlistbox.setChoices(self._choices)\n #prevent errors on \"old\" systems\n if sys.version.startswith(\"2.3\"):\n self._choices.sort(lambda x, y: cmp(x.lower(), y.lower()))\n else:\n try:\n self._choices.sort(key=lambda x: locale.strxfrm(x).lower())\n except UnicodeEncodeError:\n self._choices.sort(key=lambda x: locale.strxfrm(x.encode(\"UTF-8\")).lower())\n self._setListSize()",
"def choices(self):\n return None",
"def __init__(self, choices, *args, **kwargs):\n self.choices = dict(choices)\n self.revert_choices = dict((v, k) for k, v in self.choices.items())",
"def __init__(self, choices, num_choices):\n self._real_choices = choices\n self._last_contains_check = None\n self._num_choices = num_choices",
"def choices_validator(cls: Type[\"Model\"], values: Dict[str, Any]) -> Dict[str, Any]:\n for field_name, field in cls.Meta.model_fields.items():\n if check_if_field_has_choices(field):\n value = values.get(field_name, ormar.Undefined)\n validate_choices(field=field, value=value)\n return values",
"def build(self, choices):\n for choice in choices:\n self.addItem(choice)",
"def test_creation_good():\n value = \"boo\"\n choices = [\"boo\", \"foo\"]\n choice = param.Choice(value=value, choices=choices)\n assert choice.value == value\n assert choice.choices == choices",
"def check_selection(self, name: str, choices: list) -> Union[None, str]:\n if not hasattr(self, name):\n return \"Parameter '\" + name + \"' not found.\"\n attr = getattr(self, name)\n if type(attr) != str:\n return \"Expected parameter '\" + name + \"' to be of type string.\"\n if attr not in choices:\n match = difflib.get_close_matches(attr, choices, 1)\n if len(match) > 0:\n # noinspection PyTypeChecker\n return \"Value '\" + attr + \"' not allowed for parameter '\" + name + \"'. Did you mean '\" + \\\n match[0] + \"'?\"\n else:\n return \"Value '\" + attr + \"' not allowed for parameter '\" + name + \"'.\"\n return None",
"def test_create_choices_objects_exist(self):\n # set selected model class with existing objects\n self.widget.model_class = TestModel\n # call the _create_choices method\n widgets.ObjectIdSelect._create_choices(self.widget)\n # check whether the list contains an empty choice\n self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)\n # create choices\n choice1 = (str(self.object1.pk), self.object1)\n choice2 = (str(self.object2.pk), self.object2)\n # check whether the list contains both TestModel objects\n self.assertIn(choice1, self.widget.choices)\n self.assertIn(choice2, self.widget.choices)\n # check whether there are 3 choices so the list contains nothing\n # but two objects of the TestModel and an empty choice\n self.assertEqual(len(self.widget.choices), 3)",
"def pruneChoices(self, node):\n ch = node['options']\n self.choices = []\n for i in range(0,len(ch)):\n c = ch[i]\n if (c[0] == '' or self.evalStmt(c[0])): #is choice valid?\n flavorText = c[-1]\n self.choices.append([i,flavorText])",
"def get_choices(self, choice_kwargs={}):\n for choice_cls in self:\n yield choice_cls(**choice_kwargs)",
"def test_choice_defined_name(self):\n name = \"What's in a name?\"\n\n class ChoiceObj(smartchoices.Choices):\n MY_CHOICE = smartchoices.Choice(name=name)\n\n # We need the second element of the only tuple\n # from the choices.\n actual_name = ChoiceObj.choices[0][1]\n self.assertEqual(name, actual_name)",
"def test_choices_smart_name(self):\n\n class ChoiceObj(smartchoices.Choices):\n MY_CHOICE = smartchoices.Choice()\n\n class Meta:\n smart_names = True\n\n # We need the second element of the only tuple\n # from the choices.\n actual_name = ChoiceObj.choices[0][1]\n self.assertEqual('My Choice', actual_name)",
"def test_create_choices_objects_do_not_exist(self):\n # set selected model class without existing objects\n self.widget.model_class = AnotherTestModel\n # call the _create_choices method\n widgets.ObjectIdSelect._create_choices(self.widget)\n # check whether the list contains only one choice\n self.assertEqual(len(self.widget.choices), 1)\n # check whether an empty choice presents in the list\n self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)",
"def prompt_choice(prompt_text: str, choices: Iterable[str], default: str = ''):\n completer = WordCompleter(choices)\n choices_text = ', '.join(choices)\n return prompt(\n prompt_text,\n hint=choices_text,\n default=default,\n completer=completer,\n validator=is_one_of(choices))",
"def test_choice_name_default(self):\n\n class ChoiceObj(smartchoices.Choices):\n MY_CHOICE = smartchoices.Choice()\n\n # We need the second element of the only tuple\n # from the choices.\n actual_name = ChoiceObj.choices[0][1]\n self.assertEqual('MY_CHOICE', actual_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
(int) > int Return the number of happy numbers that are in the range of n >>> count_happy_numbers(20002) 5 >>> count_happy_numbers(120003) 729
|
def count_happy_numbers(n):
all_happy = 0
if len(str(n)) > 4:
for i in range(1, n + 1):
if happy_number(i) is True:
all_happy += 1
return all_happy
|
[
"def happy_numbers(m, n):\n lst_of_happy_in_range = []\n lst_of_happy_in_range = [int(i) for i in range(m, n + 1)\n if happy_number(i) == True]\n return lst_of_happy_in_range",
"def find_happy(N):\n return [i for i in range(N) if is_happy(i)]",
"def uglyNumbers(n):\n\tdef maxDivide(a, b):\n\t\t\"\"\" Divides a by greatest divisible power of b \"\"\"\n\t\twhile a % b == 0:\n\t\t\ta = a / b\n\t\treturn a\n\n\tdef isUgly(no):\n\t\t\"\"\" Check if no is ugly or not - any of these\"\"\"\n\t\tno = maxDivide(no, 2)\n\t\tno = maxDivide(no, 3)\n\t\tno = maxDivide(no, 5)\n\t\treturn 1 if no == 1 else 0\n\n\ti = 1\n\tcount = 1\n\twhile n > count:\n\t\ti += 1\n\t\tif isUgly(i):\n\t\t\tcount += 1\n\treturn i",
"def num_sevens(n):\n if n:\n \tif n % 10 == 7:\n \t\treturn 1 + num_sevens(n // 10)\n \telse:\n \t\treturn num_sevens(n // 10)\n else:\n \treturn 0",
"def num_allowed_dice(score, opponent_score):\n return 1 if ( (opponent_score+score == 7) or (opponent_score+score) % 10 == 7 ) else 10",
"def count_by(x, n):\n\n return list(range(x, x * n + 1, x))",
"def nonbouncy_count(n):\n return int(sum(nonbouncy(i) for i in range(1, n + 1)))",
"def hundreds(n):\n hundreds = utils.split(n)[-3]\n return len(spellingMap[hundreds] + \"hundred\")",
"def fib_first(n):\n counter01 = 1 \n for x in fib_gen():\n if number_of_digit(x) >= n:\n return counter01\n else:\n counter01 += 1",
"def over(n):\r\n count = 0\r\n for r in range(0, n):\r\n ncr = math.factorial(n) / (math.factorial(r) * math.factorial(n - r))\r\n if ncr > 1000000:\r\n count += 1\r\n return count",
"def solution(n: int = 1000) -> int:\n prev_numerator, prev_denominator = 1, 1\n result = []\n for i in range(1, n + 1):\n numerator = prev_numerator + 2 * prev_denominator\n denominator = prev_numerator + prev_denominator\n if len(str(numerator)) > len(str(denominator)):\n result.append(i)\n prev_numerator = numerator\n prev_denominator = denominator\n\n return len(result)",
"def is_happy(num):\n validate_integers(num)\n while True:\n if num == 1:\n return True\n\n if num == 89:\n return False\n\n num = happy_step(num)",
"def puissance(n):\r\n resultat=2\r\n if n==1:\r\n return resultat\r\n elif n>7000:\r\n return(0)\r\n else:\r\n for i in range(n):\r\n resultat=resultat*2\r\n return resultat",
"def ten_pairs(n):\n \"*** YOUR CODE HERE ***\"\n if n < 10:\n return 0\n return ten_pairs(n // 10) + count_digit_rec(n // 10, 10 - n % 10)",
"def happy_step(num):\n validate_integers(num)\n num = list(str(num))\n total = 0\n for digit in num:\n digit = int(digit)**2\n total = total + digit\n return total",
"def trailingZeroes(self, n):\n count = 0\n if n == 0:\n return 0\n maxk = math.floor(math.log(n) / math.log(5.))\n while maxk >= 1:\n maxm = n / math.pow(5, maxk)\n count += math.floor(maxm)\n maxk -= 1\n return int(count)",
"def num_sevens(n):\n if n == 0:\n return 0\n else:\n return num_sevens(n // 10) + (lambda k: 1 if n % 10 == 7 else 0)(n)",
"def starting_dominoes_count(player_count: int) -> int:\n if 2 <= player_count <= 4:\n return 15\n elif 5 <= player_count <= 6:\n return 12\n elif 7 <= player_count <= 8:\n return 10\n else:\n return False",
"def countNumbers(n = 1234567890):\n countN = {}\n for i in str(n):\n if i not in countN.keys():\n countN[i] = 1\n else:\n countN[i] = countN[i] + 1\n countN = pd.DataFrame.from_dict(countN,orient='index',columns=['Count']).sort_index()\n return(countN)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
(int, int) > list Return all happy numbers from range m to n >>> happy_numbers(100, 20002) [10001, 10010, 10100, 11000, 20002] >>> happy_numbers(20002, 29002) [20002, 20011, 20020, 20101, 20110, 20200, 21001, 21010, 21100, 22000]
|
def happy_numbers(m, n):
lst_of_happy_in_range = []
lst_of_happy_in_range = [int(i) for i in range(m, n + 1)
if happy_number(i) == True]
return lst_of_happy_in_range
|
[
"def generate_integers(m, n):\n return list(range(m,n+1))",
"def find_happy(N):\n return [i for i in range(N) if is_happy(i)]",
"def getNumberList(n):\n\tresult = []\n\ti = 0\n\twhile i < n:\n\t\tresult.append(i)\n\t\ti += 1\n\treturn result",
"def intlist(n):\n list1 = []\n while n:\n list1.append(n % 10)\n n = n//10\n return list1",
"def count_happy_numbers(n):\n all_happy = 0\n if len(str(n)) > 4:\n for i in range(1, n + 1):\n if happy_number(i) is True:\n all_happy += 1\n return all_happy",
"def primegen_range(m, n):\n primes = []\n\n if m < 4:\n if m <= 2:\n primes = [2, 3,]\n elif m == 3:\n primes = [3,]\n m = 5\n\n for num in xrange(m, n+1):\n if is_prime(num) is True:\n primes.append(num)\n\n return primes",
"def seven_boom(end_number):\r\n result = []\r\n for i in range(end_number):\r\n if i % 7 == 0:\r\n result.append('BOOM')\r\n continue\r\n if i % 10 == 7:\r\n result.append(\"BOOM\")\r\n continue\r\n result.append(i)\r\n return result",
"def histogram_list(n, num_list):\r\n histogram_lst = []\r\n for i, range_num in enumerate(range(n)):\r\n count_num = 0\r\n for j, num in enumerate(num_list):\r\n assert isinstance(num, int)\r\n assert num >= 0\r\n assert num < n\r\n if num == range_num:\r\n count_num += 1\r\n histogram_lst.append(count_num)\r\n return histogram_lst",
"def seven_boom(n):\r\n list_boom = [] #numbers will be inserted to this list\r\n for num in range(1, n+1): # I want the range to be from 1 to n, and not\r\n # from 0 to n-1 for example\r\n if '7' in str(num): #checks if the string '7'is in the str 'num'\r\n list_boom.append('boom')\r\n elif (num) % 7 == 0: #checks if num is divisible by 7\r\n list_boom.append('boom')\r\n else: #if num doesnt have 7 in in or if num isnt\r\n #divisible by 7, num itself will be added to\r\n #list_boom\r\n list_boom.append(str(num))\r\n return list_boom",
"def get_abundant_numbers_upto(number):\n return [number for number in range(1, number) if is_abundant(number)]",
"def get_lotto_numbers():\n number_of_lotto_numbers = 3\n lotto_numbers = []\n for i in range(number_of_lotto_numbers):\n while True:\n if i == number_of_lotto_numbers - 1:\n lotto_numbers.append(randrange(1, 26))\n break\n else:\n new_number = randrange(1, 70)\n if new_number not in lotto_numbers:\n lotto_numbers.append(new_number)\n break\n return lotto_numbers",
"def list_of_pairs(num_list, n):\r\n pairs_list = []\r\n for num1 in num_list:\r\n for num2 in num_list:\r\n assert isinstance(num1, int)\r\n assert isinstance(num2, int)\r\n if num1 != num2 and num1 + num2 == n and [num2, num1] not in pairs_list:\r\n pair = [num1, num2]\r\n pairs_list.append(pair)\r\n return pairs_list",
"def pairs(n):\n return [[x, x] for x in range(n+1) if x>0]",
"def pairs(num_list, n):\r\n list1 = []\r\n for i in range(len(num_list)): #any number in num_list\r\n for j in range(i, len(num_list)): # number that follows i\r\n if num_list[i] + num_list[j] == n: #checks if sum adds up to n\r\n list1.append([num_list[i], num_list[j]]) #adds to list1\r\n return list1",
"def split_n_range ( low , high , num ) :\n if high <= low or num < 1 : yield low , low\n elif 1 == num : yield low , high\n elif low < high and high <= num + low : yield low , high\n else : \n \n nn = high - low\n newn = nn // num\n for i in range ( 0 , num - 1 ) :\n nl = i * newn\n nh = nl + newn\n yield low + nl , low + nh \n yield low + num * newn - newn , high",
"def gen_rand_list(x: int, n: int) -> list:\n return [gen_rand_int(x) for _ in range(n)]",
"def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n for x in range(2, 200):\n if all(x % i != 0 for i in range(2, x)):\n yield x",
"def hall_numbers(graph, list_assignment, colours):\n return [hall_number(graph, list_assignment, colour) for colour in colours]",
"def fake_nums(intList, step): #8\n from random import randint\n placeToInsertNum = []\n for index in range(0, len(intList), step):\n placeToInsertNum.append(index)\n newIntList = [item for item in intList]\n for index in reversed(placeToInsertNum):\n newIntList.insert(index, randint(1, 100))\n return newIntList"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetch a sensitive value from different sources.
|
def convert_sensitive(value):
if value is None: # Not found
return None
# Short-circuit in case the value starts with value:// (ie, it is enforced)
if value.startswith('value://'):
return value[8:]
if value.startswith('env://'):
envvar = value[6:]
LOG.debug('Loading value from env var: %s', envvar)
warnings.warn(
"Loading sensitive data from environment variable is not recommended "
"and might be removed in future versions."
" Use secret:// instead",
DeprecationWarning, stacklevel=4
)
envvalue = os.getenv(envvar, None)
if envvalue is None:
raise ValueError(f'Environment variable {envvar} not found')
return envvalue
if value.startswith('file://'):
path = value[7:]
LOG.debug('Loading value from path: %s', path)
statinfo = os.stat(path)
if statinfo.st_mode & stat.S_IRGRP or statinfo.st_mode & stat.S_IROTH:
warnings.warn(
"Loading sensitive data from a file that is group or world readable "
"is not recommended and might be removed in future versions."
" Use secret:// instead",
DeprecationWarning, stacklevel=4
)
return get_from_file(path, mode='rt') # str
if value.startswith('secret://'):
path = value[9:]
LOG.debug('Loading secret from path: %s', path)
return get_from_file(path, mode='rb', remove_after=True) # bytes
# It's the value itself (even if it starts with postgres:// or amqp(s)://)
return value
|
[
"def get_value(self, **kwargs):\n return self.source_from(self, **kwargs)",
"def test_core_get_stored_value_v1(self):\n pass",
"def _retrieve_value(self, data, value):\n logging.info('Getting value for {}'.format(value))\n retrieve_data = []\n m_data = DotMap(data)\n if value.startswith('set('):\n retrieve_data = value[4:-1].split(\",\")\n elif value.startswith('values('): # TODO: nested values e.g. values(values(ids))\n search_value = re.search('{}(.*){}'.format('\\(', '\\)'), value).group(1)\n unique_list = search_value.split('.')\n m_data_cp = m_data.copy()\n for attr in unique_list:\n m_data_cp = getattr(m_data_cp, attr)\n retrieve_data = list(m_data_cp.values())\n elif ':' in value:\n obj_ref = getattr(m_data, value.split(':')[0])\n if obj_ref:\n included = value.split(':')[1]\n included = '/' + included.replace('.', '/')\n ref_data = self.wsClient.get_objects2({'objects': [{'ref': obj_ref,\n 'included': [included]}]})['data'][0]['data']\n m_ref_data = DotMap(ref_data)\n if ref_data:\n if '*' not in included:\n for key in included.split('/')[1:]:\n m_ref_data = getattr(m_ref_data, key)\n else:\n keys = included.split('/')[1:]\n m_ref_data = [x.get(keys[2]) for x in ref_data.get(keys[0])] # TODO: only works for 2 level nested data like '/features/[*]/id'\n\n retrieve_data = list(m_ref_data)\n else:\n unique_list = value.split('.')\n m_data_cp = m_data.copy()\n for attr in unique_list:\n m_data_cp = getattr(m_data_cp, attr)\n retrieve_data = list(m_data_cp)\n\n logging.info('Retrieved value (first 20):\\n{}\\n'.format(retrieve_data[:20]))\n\n return retrieve_data",
"def __call__(self, topContainer):\n\n #print(\"IN ValueGetterBase.__CAll__()\")\n\n assert(not self.__inLookup)\n\n if self.__cachedResult is not self.__NoResult:\n return self.__cachedResult\n\n self.__cachedResult = self.handleGetValue(topContainer)\n\n if isinstance(self.__cachedResult, ValueGetterBase):\n valueGetter = self.__cachedResult\n self.__inLookup = True\n self.__cachedResult = valueGetter(topContainer)\n self.__inLookup = False\n\n # The value should be full resolved at this point\n assert(self.__cachedResult is not self.__NoResult)\n assert(not isinstance(self.__cachedResult, ValueGetterBase))\n\n return self.__cachedResult",
"def fetch(self, table_name, key, value):\n i = unicode(key)+u':'+unicode(value)\n return self.data[table_name][i]",
"async def get_external(self, key, default=_EmptyArg):\n try:\n value = await self.external_backend.get(key, default)\n except Exception:\n logger.warning(f\"Setting {key} not found in external backend\")\n raise SettingNotFound(key)\n else:\n await self.internal_backend.set(key, value)\n return value",
"def fetch(cls, hash, key):\n userhash = UserHash.objects.get(\n expires__gte=timezone.now(),\n hash=hash,\n key=key\n )\n return userhash.user, userhash.value",
"def get_value(scope, dataset, key=None):\n try:\n return success({\"data\": datasets.get_value(scope, dataset, key)})\n except Exception as e:\n return failure(e)",
"def lookup(self):",
"def get_value(key):\n data = cache.get(key)\n if data:\n return pickle.loads(data)",
"def get_value(self, domain, intent, query_text):\n try:\n return self.cached_queries[(domain, intent, query_text)]\n except KeyError:\n return",
"def cache_get(self, key=None, collection: str = None, target_value_name: str = None):\n try:\n return dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n except ConnectionFailure:\n n = 0\n result = None\n while not result or n == 5:\n result = dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n n += 1\n return result\n except TypeError:\n return None",
"def lookupValue(self, playerKey, valueKey):\n valueMap = self.dataMap[valueKey]\n return self.lookupKey(playerKey, valueMap)",
"def get_single_value(self, header_to_get: str, match_header: str, match_value):\n\t\treturn self._database_api.execute_custom_query_one_result('SELECT ' + header_to_get + ' FROM ' + self.table_name + ' WHERE ' + match_header + ' = ' + self._validify_value(match_value))[0]",
"def resource_data_get(resource, key):\r\n result = resource_data_get_by_key(resource.context,\r\n resource.id,\r\n key)\r\n if result.redact:\r\n return _decrypt(result.value, result.decrypt_method)\r\n return result.value",
"def _get(self):\n try:\n return cache[self]\n except KeyError:\n cache[self] = value = func(self)\n return value",
"def test_get_another_user_key(self):\n assert self.model.get(key=\"sword\", user=self.user2) is None\n assert self.model.get(key=\"sword\", user=self.user)",
"def test_lookupDuplicateByValue(self):\n class TRANSPORT_MESSAGE(Values):\n \"\"\"\n Message types supported by an SSH transport.\n \"\"\"\n KEX_DH_GEX_REQUEST_OLD = ValueConstant(30)\n KEXDH_INIT = ValueConstant(30)\n\n self.assertIs(\n TRANSPORT_MESSAGE.lookupByValue(30),\n TRANSPORT_MESSAGE.KEX_DH_GEX_REQUEST_OLD)",
"def extract_from_cache(self, cache, key):\n try:\n value = cache.read(key)\n except Exception, e:\n value = None\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Try to load `filename` as configuration file for logging.
|
def _load_log(self, filename):
assert(filename)
_here = Path(__file__).parent
# Try first if it is a default logger
_logger = _here / f'loggers/{filename}.yaml'
if _logger.exists():
with open(_logger, 'r') as stream:
dictConfig(yaml.load(stream, Loader=sf))
return _logger
# Otherwise trying it as a path
_filename = Path(filename)
if not _filename.exists():
raise ValueError(f"The file '{filename}' does not exist")
if _filename.suffix in ('.yaml', '.yml'):
with open(_filename, 'r') as stream:
dictConfig(yaml.load(stream, Loader=sf))
return filename
if _filename.suffix in ('.ini', '.INI'):
fileConfig(filename)
return filename
# Otherwise, fail
raise ValueError(f"Unsupported log format for {filename}")
|
[
"def load_config_file():\n\n try:\n return try_load_config()\n except FileNotFoundError:\n logging.error(\"Config file does not exist\")\n raise",
"def logging_file_config(self, config_file):\n parser = configparser.ConfigParser()\n parser.read([config_file])\n if parser.has_section('loggers'):\n config_file = os.path.abspath(config_file)\n fileConfig(config_file, dict(__file__=config_file,\n here=os.path.dirname(config_file)))",
"def load_config(filename=None):\n try:\n with _config_stream(filename) as handle:\n filename = handle.name\n return deserialize_config(handle.read())\n except (OSError, toml.TomlDecodeError, UnicodeDecodeError) as exc:\n raise ConfigError(\"Error loading configuration from {}\".format(filename)) from exc",
"def read_config_file(filename):\n\n try:\n data = toml.loads(open(filename).read())\n return data\n except (Exception, FileNotFoundError, toml.TomlDecodeError) as ex:\n print(\"Error reading configuration file.\")\n print(ex)\n print()\n exit(1)",
"def setup_log(config_path='logging.yaml'):\n if os.path.exists(config_path):\n with open(config_path, 'rt') as f:\n if config_path.endswith('yaml'):\n logging.config.dictConfig(yaml.load(f))\n elif config_path.endswith('json'):\n logging.config.dictConfig(json.load(f))\n log = logging.getLogger(__name__)\n log.debug('Logger setup from configuration file: {}'.format(config_path))\n else:\n print 'Error retrieving logging file!'",
"def load(file_name):\n cfg_path = join(Config.get_config_folder(), file_name)\n if not exists(cfg_path):\n copy(Config._get_default(file_name), cfg_path)\n Logger.info(\"Config: Loading config from %s\", cfg_path)\n with open(cfg_path) as f:\n return load(f)",
"def read_config(filename):\n with open(filename) as fobj:\n return json.load(fobj)",
"def load_logging(logging_conf):\r\n if not os.path.exists(logging_conf):\r\n raise ValueError(f'cannot load logging config from {logging_conf} (does not exist)')\r\n\r\n ext = os.path.splitext(logging_conf)[1]\r\n if ext != '.json':\r\n raise ValueError(f'expected logging config is json file but got {logging_conf}')\r\n\r\n with open(logging_conf, 'r') as infile:\r\n config = json.load(infile)\r\n logging.config.dictConfig(config)",
"def load_logging_config():\n log_config_path = os.path.join(constants.CORE_CONF_DIR, \"logging.conf\")\n with open(log_config_path, \"r\") as log_config_file:\n log_config = json.load(log_config_file)\n logging.config.dictConfig(log_config)",
"def loadConfig(self):\n logging.debug(\"Trying to load configuration file from {}\".format(self.configFilename))\n if len(self.config.read(self.configFilename)) == 0:\n logging.warning(\"No configuration file in path specified. Creating default configuration file.\")\n self.setDefaultConfig()\n self.saveConfig()",
"def load_cfg(self,filepath):\n config = configparser.ConfigParser()\n config.read([filepath])\n return config",
"def load_json_config(filename, key=None):\n with open(filename, \"r\") as f:\n config = json.load(f)\n return _config_helper(config, key)",
"def _load_config( self, config_file ):\n logging.debug( \"try to load configure file:%s\" % config_file )\n with open( config_file ) as fp:\n if self._is_yaml_file( config_file ):\n return yaml.safe_load( fp )\n else:\n return json.load( fp )\n logging.error( \"fail to load the configure file\" )\n return None",
"def Load(self, filename):\n\t\tconfigdict = {}\n\t\tparser = configparser.ConfigParser()\n\t\tparser.read(filename)\n\t\tfor s in parser.sections():\n\t\t\tconfigdict[s] = {}\n\t\t\tfor o in parser.options(s):\n\t\t\t\tconfigdict[s][o] = parser.get(s, o)\n\t\tself.__mixer.ParseConfigDict(configdict)\n\t\tif self.__gui is not None:\n\t\t\tself.__gui.ParseConfigDict(configdict)",
"def load_config(self, configuration_filename):\n\n # Makes sure that the configuration file exists between tyring to load it\n if os.path.isfile(configuration_filename):\n\n try:\n\n # Creates configuration parser and starts parsing configuration file.\n rabbit_config = configparser.RawConfigParser()\n rabbit_config.read(configuration_filename)\n\n # Extracts credentials for connection\n self.username = rabbit_config.get('Gmail', 'username')\n self.password = rabbit_config.get('Gmail', 'password')\n self.client = rabbit_config.get('Gmail', 'mailhub')\n\n # Successully parsed configuration.\n general_utils.log_message('Mail configuration loaded.')\n\n # Tests if credentials do work or not\n self.test_credentials()\n\n except configparser.NoSectionError as e:\n\n # File does not exist, or section (Gmail) does not exist\n self.error_status = general_utils.log_error(-1, python_message=e,\n error_details='Gmail')\n\n except configparser.NoOptionError as e:\n\n # Option does not exist\n self.error_status = general_utils.log_error(-2, python_message=e)\n\n except ValueError as e:\n\n # Port error\n self.error_status = general_utils.log_error(-3, python_message=e)\n\n else:\n\n # Configuration file did not exist, stop and return fatal error\n self.error_status = general_utils.log_error(-401, error_details=configuration_filename)\n\n #######\n return\n #######",
"def _try_to_load_agent_configuration_file(aea_project_path: Path) -> None:\n try:\n configuration_file_path = Path(aea_project_path, DEFAULT_AEA_CONFIG_FILE)\n with configuration_file_path.open(mode=\"r\", encoding=\"utf-8\") as fp:\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n agent_configuration = loader.load(fp)\n logging.config.dictConfig(agent_configuration.logging_config) # type: ignore\n except FileNotFoundError: # pragma: nocover\n raise Exception(\n \"Agent configuration file '{}' not found in the current directory.\".format(\n DEFAULT_AEA_CONFIG_FILE\n )\n )\n except jsonschema.exceptions.ValidationError: # pragma: nocover\n raise Exception(\n \"Agent configuration file '{}' is invalid. Please check the documentation.\".format(\n DEFAULT_AEA_CONFIG_FILE\n )\n )",
"def _logger_init(config_file_name=LOGGING_CONFIG_FILE_NAME):\n if lock.acquire(blocking=False):\n _defaultConfig()\n\n # If the LOGGER_HOME environment variable is NOT set, it uses defaults\n logConfigPath = env.get(LOGGER_HOME)\n if logConfigPath:\n logConfigPath += '/' + config_file_name\n\n path = Path(logConfigPath)\n if path.exists():\n try:\n yaml.add_constructor('!join', join)\n yaml.add_constructor('!get_logging_home', get_logging_home)\n with open(path) as configFile:\n yamlConfig = yaml.load(configFile.read())\n logging.config.dictConfig(yamlConfig['logging'])\n configFile.close()\n except Exception:\n # We were unable to open or read the yaml file\n # so create a default log config\n # There is nothing to be done, the fall back will be the\n # default config\n print('The logger config file:', config_file_name,\n 'could not be read, using default configuration.')\n else:\n # No logging configuration path could be established.\n # There is nothing to be done, the fall back will be the default\n # config\n print('The logger config file:', config_file_name,\n 'could not be found, using default configuration.')",
"def load_config(configFile):\n global config\n defaults = {\n 'server': 'mlck',\n 'admin': 'Unidentified administrator',\n 'email': 'admin@example.com',\n 'location': 'The Internet',\n 'motd': 'mlck.motd',\n 'loglevel': 'INFO',\n 'listen': '127.0.0.1',\n 'port': 6667,\n 'buffer': 1024,\n 'encoding': 'utf-8',\n 'endpoint': 'http://localhost:3000',\n 'refresh': 5,\n 'timeout': 2.5,\n 'color': '#000000',\n }\n cnf = configparser.ConfigParser()\n if not os.path.isfile(configFile):\n logger.critical('configuration file \"%s\" not found', configFile)\n sys.exit(1)\n try:\n cnf.read(configFile)\n except Exception:\n logger.critical('failed to parse the configuration file')\n sys.exit(1)\n if not cnf.has_section('mlck'):\n logger.critical('configuration is missing the mlck section')\n sys.exit(1)\n try:\n # Strings\n for opt in ('server', 'loglevel', 'listen', 'encoding', \\\n 'endpoint', 'color', 'admin', 'email', 'location', 'motd'):\n config[opt] = cnf.get('mlck', opt, fallback=defaults[opt])\n # Ints\n for opt in ('port', 'buffer'):\n config[opt] = cnf.getint('mlck', opt, fallback=defaults[opt])\n # Floats\n for opt in ('refresh', 'timeout'):\n config[opt] = cnf.getfloat('mlck', opt, fallback=defaults[opt])\n except ValueError:\n logger.critical('failed parsing the configuration file')\n sys.exit(1)\n loglevel = getattr(logging, config['loglevel'].upper())\n if not isinstance(loglevel, int):\n logger.critical('invalid log level configured')\n sys.exit(1)\n logger.setLevel(loglevel)\n if config['endpoint'].endswith('/'):\n config['endpoint'] = config['endpoint'][:-1]",
"def _initialize_log_file(config, option_dict):\n global file_handle\n\n if not OPTION_DESTINATION_FILE in option_dict[KEY_DESTINATION]:\n # we don't need to initialize the file logging (because the user didn't request it.)\n # bail out\n return\n\n # by getting to this point, we know that the user specified \"File\" in the \"Destination\" option.\n\n if option_dict[KEY_FILE] is None or option_dict[KEY_FILE] == \"\":\n raise ConfigException(\"A file must be specified when logging to a file. Check your 'File=' option.\")\n\n try:\n log_filename = config.string_builder.build_string(option_dict[KEY_FILE], option_dict[KEY_FILE_PARAMS])\n file_handle = open(log_filename, \"w\")\n except IOError:\n raise ConfigException(\"Couldn't open file, %s, for writing.\" % option_dict[KEY_FILE])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reverses an index, if needed
|
def _rev(self, idx: int) -> int:
if self.order_reversed:
if idx < 0:
idx = -idx - 1
else:
idx = len(self._view) - idx - 1
if idx < 0:
raise IndexError
return idx
|
[
"def reverse_idx(self):\n self.reverseIdx = {i : word for word, i in self.wordIdx.items()}\n return True",
"def reverse(self, in_place=False):\n pass",
"def reverse_dataframe(df):\n return df[::-1].reset_index(drop=True)",
"def fix_reverse_index(self):\n tree = etree.HTML(self.reverse_index)\n for i, entry in enumerate(tree[0]):\n if entry.get('class') != 'letHead':\n for anchor in entry.xpath('.//a'):\n urlhash = anchor.get('href')\n letter = anchor.text[0].upper()\n # Some words begin with a dash or an equals sign\n if not letter.isalpha() and len(anchor.text) >= 2:\n letter = anchor.text[1].upper()\n try:\n root_url = reverse('vocabulary:index', args=[letter])\n except NoReverseMatch:\n logger.error('No match for vocabulary index, letter %s', letter)\n else:\n anchor.attrib['href'] = root_url + urlhash\n self.reverse_index = etree.tostring(tree, encoding='unicode')",
"def reversing_mutation(genome):\n reverse_point = random.randint(0, len(genome))\n mid_point = reverse_point + (len(genome) - reverse_point) / 2\n\n for allele in range(reverse_point, mid_point):\n temp_allele = genome[allele]\n genome[allele] = genome[2 * mid_point - allele]\n genome[2 * mid_point + allele] = temp_allele\n\n return genome",
"def reverse(self): \n return self.write({'state':'reversed'})",
"def reverse(L):\r\n return L[::-1]",
"def invert(self):\n exprs = self._index_exprs()\n for col in self.columns:\n exprs[col] = self.ref(col).invert()\n return self.copy(op=TransformNode(self, exprs))",
"def decrement(self):\n if self.view_index > 1:\n self.view_index -= 1\n elif self.view_index == 1:\n self.view_index = self.end()",
"def reverse(tensor, axis, name=None):\n return array_ops.reverse(tensor, axis=axis, name=name)",
"def reindexObject(idxs=[]):",
"def reverse(inp, axis=0, ind_range=[-1,-1]):\n\n inp = NP.asarray(inp)\n\n try:\n isinstance(inp, NP.ndarray)\n # type(inp) is numpy.ndarray\n except TypeError: \n print 'Unable to convert to Numpy array data type'\n sys.exit(1) # Abort execution\n\n shp = NP.shape(inp)\n ndim = len(shp)\n \n if ndim > 8:\n print \"Input data with more than 8 dimensions not supported.\"\n print \"Aborted execution in my_operations.reverse()\"\n sys.exit(1)\n\n if (axis < 0) or (axis >= ndim):\n print \"Input data does not contain the axis specified.\"\n print \"Aborted execution in my_operations.reverse()\"\n sys.exit(1) \n\n if (ind_range[0] <= -1):\n ind_range[0] = 0 # set default to starting index\n\n if (ind_range[1] == -1) or (ind_range[1] >= shp[axis]):\n ind_range[1] = shp[axis]-1 # set default to ending index\n\n if shp[axis] == 1:\n return inp\n\n revinds = range(ind_range[1],ind_range[0]-1,-1)\n\n if ndim == 1:\n return inp[revinds]\n elif ndim == 2:\n if axis == 0:\n return inp[revinds,:]\n else:\n return inp[:,revinds]\n elif ndim == 3:\n if axis == 0:\n return inp[revinds,:,:]\n elif axis == 1:\n return inp[:,revinds,:]\n else:\n return inp[:,:,revinds]\n elif ndim == 4:\n if axis == 0:\n return inp[revinds,:,:,:]\n elif axis == 1:\n return inp[:,revinds,:,:]\n elif axis == 2:\n return inp[:,:,revinds,:]\n else:\n return inp[:,:,:,revinds]\n elif ndim == 5:\n if axis == 0:\n return inp[revinds,:,:,:,:]\n elif axis == 1:\n return inp[:,revinds,:,:,:]\n elif axis == 2:\n return inp[:,:,revinds,:,:]\n elif axis == 3:\n return inp[:,:,:,revinds,:]\n else:\n return inp[:,:,:,:,revinds]\n elif ndim == 6:\n if axis == 0:\n return inp[revinds,:,:,:,:,:]\n elif axis == 1:\n return inp[:,revinds,:,:,:,:]\n elif axis == 2:\n return inp[:,:,revinds,:,:,:]\n elif axis == 3:\n return inp[:,:,:,revinds,:,:]\n elif axis == 4:\n return inp[:,:,:,:,revinds,:]\n else:\n return inp[:,:,:,:,:,revinds]\n elif ndim == 7:\n if axis == 0:\n return inp[revinds,:,:,:,:,:,:]\n elif axis == 1:\n return inp[:,revinds,:,:,:,:,:]\n elif axis == 2:\n return inp[:,:,revinds,:,:,:,:]\n elif axis == 3:\n return inp[:,:,:,revinds,:,:,:]\n elif axis == 4:\n return inp[:,:,:,:,revinds,:,:]\n elif axis == 5:\n return inp[:,:,:,:,:,revinds,:]\n else:\n return inp[:,:,:,:,:,:,revinds]\n elif ndim == 8:\n if axis == 0:\n return inp[revinds,:,:,:,:,:,:,:]\n elif axis == 1:\n return inp[:,revinds,:,:,:,:,:,:]\n elif axis == 2:\n return inp[:,:,revinds,:,:,:,:,:]\n elif axis == 3:\n return inp[:,:,:,revinds,:,:,:,:]\n elif axis == 4:\n return inp[:,:,:,:,revinds,:,:,:]\n elif axis == 5:\n return inp[:,:,:,:,:,revinds,:,:]\n elif axis == 6:\n return inp[:,:,:,:,:,:,revinds,:]\n else:\n return inp[:,:,:,:,:,:,:,revinds]",
"def reverse(self):\n self.flips.reverse()\n for e in self.flips:\n self.permute(e, False)\n self.flips = []",
"def save_index_off(self):\r\n\t\tself.save_index = False",
"def flip(index,list1):\n list2=list1[:] #clones a list\n if list2[index]==0:\n list2[index]=1\n else:\n list2[index]=0\n return list2",
"def _reverse(self):\n o = self.copy()\n # Clear ok reversed flag\n o._reversed = not o._reversed\n\n if o.bits == 8:\n # No need for reversing\n return o.copy()\n\n if o.is_top:\n # A TOP is still a TOP after reversing\n si = o.copy()\n return si\n\n else:\n if not o.is_integer:\n # We really don't want to do that... but well, sometimes it just happens...\n logger.warning(\"Reversing a real strided-interval %s is bad\", self)\n\n # Reversing an integer is easy\n rounded_bits = ((o.bits + 7) // 8) * 8\n list_bytes = []\n si = None\n\n for i in range(0, rounded_bits, 8):\n b = o._unrev_extract(min(i + 7, o.bits - 1), i)\n list_bytes.append(b)\n\n for b in list_bytes:\n si = b if si is None else si.concat(b)\n si.uninitialized = self.uninitialized\n si._reversed = o._reversed\n return si",
"def applyReverse(self, rv):\n from .trace import applyReverseTransform\n return applyReverseTransform(self, rv)",
"def reverse(self):\n # Taken from implementation in collections.abc.MutableSequence\n names = self.keys()\n n = len(self)\n for i in range(n // 2):\n self[i], self[n - i - 1] = self[n - i - 1], self[i]\n for i, name in enumerate(reversed(names)):\n self.set_block_name(i, name)",
"def reversedEnumerate(l):\n return zip(range(len(l)-1, -1, -1), l[::-1])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Go to a specified offset. Positive offests are from the beginning of the view, negative from the end of the view, so that 0 is the first flow, 1 is the last flow.
|
def go(self, offset: int) -> None:
if len(self) == 0:
return
if offset < 0:
offset = len(self) + offset
if offset < 0:
offset = 0
if offset > len(self) - 1:
offset = len(self) - 1
self.focus.flow = self[offset]
|
[
"def seek(self, seek_pos=0.0):\n self.sendmessage('JUMP ' + str(seek_pos) + 's')",
"def jmp(self, offset):\n self.ip += int(offset)",
"def seek(self, offset):\n self._filelike.seek(offset)",
"def jump_to_page(self, page: int) -> None:\n overlap = 1 if self.column >= 2 else 0\n self.canvas.yview_moveto(\n str((page // self.column) / (len(self.pages) // self.column + overlap))\n )",
"def op_jump(self, offset):\n\n old_pc = self._opdecoder.program_counter\n\n # The offset to the jump instruction is known to be a 2-byte\n # signed integer. We need to make it signed before applying\n # the offset.\n if (offset >= (1<<15)):\n offset = - (1<<16) + offset\n log(\"Jump unconditionally to relative offset %d\" % offset)\n\n # Apparently reading the 2 bytes of operand *isn't* supposed\n # to increment the PC, thus we need to apply this offset to PC\n # that's still pointing at the 'jump' opcode. Hence the -2\n # modifier below.\n new_pc = self._opdecoder.program_counter + offset - 2\n self._opdecoder.program_counter = new_pc\n log(\"PC has changed from from %x to %x\" % (old_pc, new_pc))",
"def seek(self, offset):\n self.file_in.seek(offset)\n return",
"def viewport_offset(self, value):\n self.offset = value",
"def skip_to_offset(self, offset=0x01):\n if offset < self._offset:\n raise ValueError(\"Offset to move should be greater\"\n \"than current offset\")\n return self.skip_bytes(offset - self._offset)",
"def advance_offset(self, new_offset: int):\n assert not self._iterating\n assert new_offset <= self._offset + self._size\n while self._buffers and new_offset >= self._offset + len(self._buffers[0]):\n b = self._buffers.popleft()\n n = len(b)\n self._offset += n\n self._size -= n\n if new_offset > self._offset:\n n = new_offset - self._offset\n b = self._buffers[0]\n assert n < len(b)\n b = b[n:]\n self._buffers[0] = b\n self._offset += n\n self._size -= n\n assert self._offset == new_offset",
"def update_offset(self, new_offset):\r\n self.offset = new_offset",
"def offset(self, value):\n self._offset = value",
"def jump(self, pos):\n self.sonus.playlist_set_next(pos)\n self.sonus.playback_tickle()",
"def offset(self, offset):\n\n span = self\n if offset > 0:\n for i in range(offset):\n span = span.next_period()\n elif offset < 0:\n for i in range(-offset):\n span = span.prev_period()\n return span",
"def change_offset_page(self):\n try:\n if self.ui.rbt_configure_offsets.isChecked():\n self.ui.stw_offsets.setCurrentIndex(1)\n else:\n self.ui.stw_offsets.setCurrentIndex(0)\n except Exception:\n _traceback.print_exc(file=_sys.stdout)",
"def move(self, offset):\n if offset[0] or offset[1]:\n self.rect.move_ip(offset)\n self.maprect.move_ip(offset)",
"def getOffset(self, index: int) -> int:\n ...",
"def step(self, offset: int = None) -> bool:\n if self.current_slot is not None:\n raise AssertionError('current_slot invariant not true')\n\n self.current_slot = offset if offset is not None else -1\n return self.schedule()",
"def jumpToMarker(index, select):",
"def focus_next(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index + 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set focus to the next flow.
|
def focus_next(self) -> None:
if self.focus.index is not None:
idx = self.focus.index + 1
if self.inbounds(idx):
self.focus.flow = self[idx]
else:
pass
|
[
"def focus_next(self, window: Optional[wrappers.Window] = None) -> None:\n pass",
"def focus(self):\n self.node.focus()",
"def setFocus():\n pass",
"def set_focus(self):\n self.logger.info(f\"Set focus on element: {self.selectors}\")\n self.element.set_focus()",
"def set_focus_on_answer(self):\n self.answer_entry.focus()",
"def focus(self):\n self.dispatch('GoToCell', ('ToPoint', self.selector))",
"def focus(self):\n self.__run_js(Utils.qt_js_prepare('Qt.focus(\"{0}\")'.format(self.node_id)))",
"def set_current(self):\n if self.tabindex is not None:\n self.view.tabber.setCurrentIndex(self.tabindex)",
"def focus(self, locator):\n self._selenium.focus(locator)",
"def go(self, offset: int) -> None:\n if len(self) == 0:\n return\n if offset < 0:\n offset = len(self) + offset\n if offset < 0:\n offset = 0\n if offset > len(self) - 1:\n offset = len(self) - 1\n self.focus.flow = self[offset]",
"def set_focus(self, domain):\n self.domain_focus = domain\n self.save()\n if domain.next_technology_class_index is None:\n domain.select_next_technology()",
"def set_focus_real(self):\r\n Clock.schedule_once(self.set_focus, 0.5)",
"def focus(self):\n hover = ActionChains(self.driver).move_to_element(self._find_element())\n hover.click()\n hover.perform()",
"def focus_prev(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index - 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def init_goto_line(self):\n self.local_state = State.GOTO_LINE\n self.setFocus()\n self.setValidator(self.int_validator)",
"def setFocus(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def set_focused(self):\n self.has_keyboard_focus = True",
"def force_focus(self):\n\n\t\tself.lift()\n\t\tself.focus_force()\n\t\tself.attributes(\"-topmost\", True)\n\t\tself.grab_set()",
"def change_focus(window):\n xdotool('windowactivate', window)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set focus to the previous flow.
|
def focus_prev(self) -> None:
if self.focus.index is not None:
idx = self.focus.index - 1
if self.inbounds(idx):
self.focus.flow = self[idx]
else:
pass
|
[
"def focus_prev(self, window: Optional[wrappers.Window] = None) -> None:\n pass",
"def setFocus():\n pass",
"def focus(self):\n self.node.focus()",
"def restore_previous_tab(self):\n\n if self._previous_tab:\n if not self.set_current_tab(self._previous_tab):\n self.set_current_index(0)\n else:\n self.set_current_index(0)",
"def reset_focus(self):\n\n self.set_focus()\n\n return self",
"def focus_next(self, window: Optional[wrappers.Window] = None) -> None:\n pass",
"def focus_next(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index + 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def force_focus(self):\n\n\t\tself.lift()\n\t\tself.focus_force()\n\t\tself.attributes(\"-topmost\", True)\n\t\tself.grab_set()",
"def moveToPrevious(self):\n pass",
"def set_current(self):\n if self.tabindex is not None:\n self.view.tabber.setCurrentIndex(self.tabindex)",
"def change_focus(window):\n xdotool('windowactivate', window)",
"def set_focus_on_answer(self):\n self.answer_entry.focus()",
"def backWidget(self):\n self.setWidget('start')",
"def setFocus(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def focus(self):\n self.__run_js(Utils.qt_js_prepare('Qt.focus(\"{0}\")'.format(self.node_id)))",
"def to_prev_screen(self) -> None:\n if self.game_mode == 'comp' and self.num_players == 2:\n self.reset_num_screen()\n self.parent.current = 'menu'\n elif self.game_mode == 'game' or (self.game_mode == 'comp' and self.num_players > 2):\n self.reset_num_screen()\n self.parent.current = 'number'\n elif self.game_mode == 'solo':\n self.reset_goal_screen()\n self.parent.current = 'goal'\n self.clear_widgets(self.children[:-2])",
"def focus(self):\n self.dispatch('GoToCell', ('ToPoint', self.selector))",
"def on_lost_focus(self, event):\n event.Skip()\n self.shell_obj._field_lost_focus()",
"def set_focus_real(self):\r\n Clock.schedule_once(self.set_focus, 0.5)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the current view order.
|
def set_order(self, order_key: str) -> None:
if order_key not in self.orders:
raise exceptions.CommandError(
"Unknown flow order: %s" % order_key
)
order_key = self.orders[order_key]
self.order_key = order_key
newview = sortedcontainers.SortedListWithKey(key=order_key)
newview.update(self._view)
self._view = newview
|
[
"def set_render_order(self, order):\n self._set_render_order(order)",
"def set_module_order(self, order):\n with self.order_lock:\n self.module_order.set(order)\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")",
"def defineSlideOrder(self, slide_order):\n self.slide_order = slide_order",
"def evaluation_order(self, evaluation_order):\n\n self.container['evaluation_order'] = evaluation_order",
"def set_cursor_order(self, order):\n if order is not None:\n new_cursor_pos=format.order_to_pos(str(self.text()),order)\n self.setCursorPosition(new_cursor_pos)",
"def set_partialOrdering(self, partialOrder):\n self.partialOrdering = partialOrder",
"def run_order(self, run_order):\n\n self._run_order = run_order",
"def set_rank_order(order):\n global RANK_ORDER\n RANK_ORDER = order",
"def save(self):\n self.enabler.storeOrder()",
"def SetNewManualOrder(self):\n self.sortMode = \"manual\"\n self.isManuallySorted = True\n self.lastManuallySortedEntries = self.entries\n \n self.ManualSortingEnabled.emit()",
"def save(self):\n order = self.context['order']\n order.place_order()",
"def setOrder(order):\n ierr = c_int()\n lib.gmshModelMeshSetOrder(\n c_int(order),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetOrder returned non-zero error code: \",\n ierr.value)",
"def setLayerGroupOrder( self, index ):\r\n\t\tself.metaData().setValue( 'groupOrder', index + 1 )\r\n\t\treturn True",
"def setSortedObjectOrderStrategy(self, *args):\n return _coin.SoGLRenderAction_setSortedObjectOrderStrategy(self, *args)",
"def setView(self, v):\n self.view = v",
"def evalOrderUIreorderIK():\n global gEvalOrder\n\n # Check window\n win = 'evaluationOrderUI'\n if not cmds.window(win, q=True, ex=True):\n raise UserInputError('Evaluation Order UI is not open!!')\n\n # Reorder using IK\n gEvalOrder.ikReorder()\n\n # Display evaluation order list in UI\n evalOrderUIrefreshList()",
"def render_order(self):\n ret_val = self._render_order()\n return ret_val",
"def views_on_top(self):\n for view in self._views:\n view.on_top()",
"def set_zonal_order(self, order=7):\n self.set_abstract_item(\"General\", \"Zonal order\", order)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clears both the store and view.
|
def clear(self) -> None:
self._store.clear()
self._view.clear()
self.sig_view_refresh.send(self)
self.sig_store_refresh.send(self)
|
[
"def clear(self):\n self.collection.clear()",
"def clear(self):\n self.models = {}\n self.model_ids = []",
"def clear (self):\n for object in self._objects[:]:\n object.destroy ()\n self._objects = []",
"def clear(self):\n self.msg_store = ''",
"def clear_all(self):\n self._cache.clear()\n return self",
"def on_clear_clicked(self, obj):\n self.book_model.clear()\n self.book.clear()",
"def clear(self):\r\n self.cmdset_stack = [self.cmdset_stack[0]]\r\n self.mergetype_stack = [self.cmdset_stack[0].mergetype]\r\n storage = self.obj.cmdset_storage\r\n if storage:\r\n storage = storage[0]\r\n self.obj.cmdset_storage = storage\r\n self.update()",
"def clear(self):\n\t\tself.stack_widget.clear()\n\t\tself.frame_widget.clear()",
"def clear(self):\n pyamf.BaseContext.clear(self)\n\n self.amf3_objs = []\n self.rev_amf3_objs = {}\n\n if hasattr(self, 'amf3_context'):\n self.amf3_context.clear()",
"def clear(self):\n self._actions = []\n self._action_map = None",
"def clear_cart(self):\n self.cart.clear()",
"def clear(self):\n self.shapes.clear()",
"def clear_collections(self):\n with MongoDB() as database:\n database[\"products\"].drop()\n database[\"customers\"].drop()\n database[\"rentals\"].drop()",
"def clear_actions(self):\n\t\t\n\t\tself.b.blivet_reset()\n\t\t\n\t\tself.history.clear_history()\n\t\t\n\t\tself.list_devices.update_devices_view()\n\t\tself.update_partitions_view(self.disk)",
"def clear(self):\n self.listwalker.clear()",
"def clearAll(self):\n\t\tself.faceSnapShot = None #This is the state of the HappyFace to which all the expressions are compared\n\t\tself.expressionLibrary = []",
"def delete(self):\n if self._store:\n self._store.delete(self.key)",
"def clear(self):\r\n with self._hlock: \r\n self.handlers.clear()\r\n with self._mlock: \r\n self.memoize.clear()",
"def clear(self) -> None:\n removed = self.storage.clear_entry_data()\n self.storage.clear_last_post_time()\n logging.info(f\"Removed {removed} entries\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clears only the unmarked flows.
|
def clear_not_marked(self) -> None:
for flow in self._store.copy().values():
if not flow.marked:
self._store.pop(flow.id)
self._refilter()
self.sig_store_refresh.send(self)
|
[
"def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if f.killable:\n f.kill()\n if f in self._view:\n # We manually pass the index here because multiple flows may have the same\n # sorting key, and we cannot reconstruct the index from that.\n idx = self._view.index(f)\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f, index=idx)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n if len(flows) > 1:\n ctx.log.alert(\"Removed %s flows\" % len(flows))",
"def clear (self):\n self.__statuses.clear()",
"def clear_blocks(self):\n self.blocks = []\n self._center_block = None",
"def ClearUnread(self):\n self.unread = 0",
"def clear(self):\n self.stack = list()",
"def clearFlags(self):\n\n self.sorted = False\n self.statted = False",
"def clear(self):\n\n for node in self._nodes:\n node.clear()",
"def erase(self):\n self._evidence = [None] * len(self.ground_atoms)",
"def clear(self):\n self.collected = []",
"def clearLayout(self):\n for index in range(self.flowLayout.count()):\n if self.flowLayout.itemAt(index).widget():\n self.flowLayout.itemAt(index).widget().deleteLater()",
"def clear(self):\n self._actions = []\n self._action_map = None",
"def clear(self):\n for _, b in self.blocks.items():\n b.clear()\n\n self.frame_start_time = None",
"def clear_messages(self):\n for e in self.edges:\n e.clear_messages()",
"def clearWorkflow(self):\n\n self.mongoCmd(N.clearWorkflow, N.workflow, N.delete_many, {})",
"def reset(self):\n logging.warn('dropping BFS state, queue depth = %d',\n self._redis.llen(self.QUEUE_NS))\n self._redis.delete(self.QUEUE_NS, self.VISITED_NS)",
"def clearSets(self):\r\n self.matchSet = []\r\n self.correctSet = []",
"def clear_all(self):\n data = self.Entries\n del data[:]",
"def clear(self):\n\n\t\tself.__fp_cuboids = []\n\t\tself.__fp_rec_errors = []",
"def clear(self):\n self.listwalker.clear()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a value from the settings store for the specified flow.
|
def getvalue(self, flow: mitmproxy.flow.Flow, key: str, default: str) -> str:
return self.settings[flow].get(key, default)
|
[
"def __get_value_from_datastore(name):\n # type: (str) -> str\n setting = GaeEnvSettings.query(\n GaeEnvSettings.name == str(name)).get() # type: GaeEnvSettings\n if not setting:\n return None\n return setting.value # type: str",
"def get_value(self, key):\n if self.settings.has_key(key):\n return self.settings[key]\n else:\n return None",
"def get(self, setting):\n return self.settings.get(setting, \"\")",
"def get(name):\n value = Configuration.settings.get(name, None)\n\n if value is None:\n raise ConfigurationNotFound(name)\n\n return value",
"def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]:\n return self._store.get(flow_id)",
"def _getSetting(self, settingName):\r\n setting = Setting.objects.get(name__name=settingName).value\r\n return setting",
"def _get_from_backend(self, parameter, section):\n value = None\n try:\n value = self.config_backend.get(section, parameter.id)\n except (NoOptionError, NoSectionError):\n # Ignore, we return None.\n pass\n return value",
"def getPolicyFlowSetting(self, **kwargs):\n\n allParams = []\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getPolicyFlowSetting\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/policy/flow/setting'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json'\n\n \n\n \n\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n \n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'PolicyFlowSettingResult')\n return responseObject",
"def get_setting(self, name):\r\n\r\n if name not in self._settings_definitions or \\\r\n name not in self._settings_running_registry:\r\n raise SettingNotFound, \"setting '%s' not found\" % (name)\r\n\r\n return self._settings_running_registry[name]",
"def get(self, botconf, cat=None):\n setting = botconf.get(self.name)\n return setting if (setting is not None) else self.default",
"def get_value(self, config_field):\n raise NotImplementedError",
"def get_settings(self, field, settings, **kws):\n handle = kws.get('handle', 'setpoint')\n fld = self.get_field(field)\n sp_vals = [settings.get(sp) for sp in getattr(fld, handle)]\n if None in sp_vals:\n _LOGGER.warning(\n \"Failed to get {} PV reading(s) of '{} [{}]'.\".format(handle, self.name, field))\n print(\n \"Failed to get {} PV reading(s) of '{} [{}]'.\".format(handle, self.name, field))\n return None\n return fld.read_policy([Number(float(x)) for x in sp_vals])",
"def get(ctx, setting):\n print(f\"{ctx.obj.config.dump(setting)}\")",
"def get_value(self, key: str):\n try:\n return self._config_contents[key]\n except KeyError:\n print(f\"Could not find the desired key: {key} in the config file\")",
"def to_frontend_value(cfg):\n if cfg.key == CACHE_TIMEOUT:\n return cfg.value.total_seconds()\n elif cfg.key == INCLUDE_FACULTY:\n return cfg.value\n elif cfg.key == INCLUDE_RESIDENTS:\n return cfg.value\n else:\n return None",
"def get_value(field):\n try:\n current_value = subprocess.check_output([\"ovs-vsctl\", \"get\", \"Open_vSwitch\", \".\", \"other_config:{}\".format(field)])\n except Exception:\n return None\n return current_value.lstrip('\\\"').strip('\\n').rstrip('\\\"')",
"async def get_external(self, key, default=_EmptyArg):\n try:\n value = await self.external_backend.get(key, default)\n except Exception:\n logger.warning(f\"Setting {key} not found in external backend\")\n raise SettingNotFound(key)\n else:\n await self.internal_backend.set(key, value)\n return value",
"def read(self):\n if not self.allow_read:\n logger.error(f\"Unauthorized read access in property {self.id()}!\")\n return None\n return self.value",
"def setting(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"setting\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set a value in the settings store for the specified flows.
|
def setvalue(
self,
flows: typing.Sequence[mitmproxy.flow.Flow],
key: str, value: str
) -> None:
updated = []
for f in flows:
self.settings[f][key] = value
updated.append(f)
ctx.master.addons.trigger("update", updated)
|
[
"def set(ctx, setting, value):\n ctx.obj.config.set(setting, value)\n ctx.obj.config.save()",
"def set(name, value):\n Configuration.settings[name] = value",
"def set(\n ctx: Context, type: str, encrypted: bool, setting: str, value: str = None\n ):\n cli_context: CliContext = ctx.obj\n cli_context.get_configuration_dir_state().verify_command_allowed(\n AppcliCommand.CONFIGURE_SET\n )\n\n # Check if value was not provided\n if value is None:\n value = click.prompt(\"Please enter a value\", type=str)\n\n # Transform input value as type\n transformed_value = StringTransformer.transform(value, type)\n\n # We don't support encrypting non-string-typed values yet, so error and exit.\n if encrypted and not isinstance(transformed_value, str):\n error_and_exit(\n \"Cannot encrypt a non-string-typed value. Exiting without setting value.\"\n )\n\n # Set settings value\n final_value = (\n encrypt_text(cli_context, transformed_value)\n if encrypted\n else transformed_value\n )\n\n configuration = ConfigurationManager(cli_context, self.cli_configuration)\n configuration.set_variable(setting, final_value)\n\n logger.debug(\n f\"Successfully set variable [{setting}] to [{'### Encrypted Value ###' if encrypted else value}].\"\n )",
"def set(value,force=False):",
"def change_settings(self, index, setting, value):\r\n self.__add_action(index, ('settings', setting, value))",
"def set(key, value):\n\tglobals()[key] = value\n\tuser_conf.save(key, value)",
"def set_value(self, channel, val):\n # print(\"SETTING\")\n epics.PV(channel).put(val)\n return",
"def setValue(self, *args):\n return _coin.SoSFPlane_setValue(self, *args)",
"def setValue(self, *args):\n return _coin.SoSFEngine_setValue(self, *args)",
"def set(self, key, value):\n self.config[key] = value\n self.saveConfig()",
"def set_settings(channel_id, ques_set, no):\n # Sets the passed settings in the math bot config file\n check_server(channel_id)\n db = sqlite3.connect('Ranking/Rankings.db')\n adder = db.cursor()\n adder.execute('UPDATE servers SET Question = ? WHERE ServerID = ?', (no, channel_id))\n adder.execute('UPDATE servers SET Folder = ? WHERE ServerID = ?', (ques_set, channel_id))\n db.commit()\n db.close()",
"def set_value(self,value):\n self.node.set(value)",
"def set(self, workflow_id: str, key: str, value: Optional[str]) -> None:\n raise NotImplementedError",
"def test_store_settings_value(self):\n settings['version'] = '1'\n self.assertEqual('1', settings['version'])",
"def send_flows_to_dp_by_id(self, valve, flows):\n flows = valve.prepare_send_flows(flows)\n self.last_flows_to_dp[valve.dp.dp_id] = flows",
"def direct_set(self, key: str, value):\n set_store_value(self.store, key, value)",
"def set(key, value):\n db_row = SiteConfiguration.query.filter_by(key=key).one_or_none()\n if db_row is None:\n db_row = SiteConfiguration(key, value)\n db.session.add(db_row)\n else:\n db_row.value = value\n db.session.commit()\n memo[key] = value",
"def set_system_setting(self, setting: SystemSettings, value: str):\n\t\tquery_str = [\n\t\t\t\"UPDATE system_settings SET value_text = '{0}'\",\n\t\t\t\"WHERE key_name = '{1}';\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str).format(value, setting.name)\n\t\t)\n\t\tself.conn.commit()\n\t\tif self.c.rowcount <= 0:\n\t\t\tquery_str = [\n\t\t\t\t\"INSERT INTO system_settings (key_name, value_text)\",\n\t\t\t\t\"VALUES ('{0}', '{1}');\"\n\t\t\t]\n\t\t\tself.c.execute(\n\t\t\t\tstr.join(\" \", query_str).format(setting.name, value)\n\t\t\t)\n\t\t\tself.conn.commit()\n\n\t\t### END of system_settings functiones ###\n\n\t\t### Start feeding_logs functiones ###",
"def setValue(self, *args):\n return _coin.SoSFNode_setValue(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Duplicates the specified flows, and sets the focus to the first duplicate.
|
def duplicate(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:
dups = [f.copy() for f in flows]
if dups:
self.add(dups)
self.focus.flow = dups[0]
ctx.log.alert("Duplicated %s flows" % len(dups))
|
[
"def update(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if self.filter(f):\n if f not in self._view:\n self._base_add(f)\n if self.focus_follow:\n self.focus.flow = f\n self.sig_view_add.send(self, flow=f)\n else:\n # This is a tad complicated. The sortedcontainers\n # implementation assumes that the order key is stable. If\n # it changes mid-way Very Bad Things happen. We detect when\n # this happens, and re-fresh the item.\n self.order_key.refresh(f)\n self.sig_view_update.send(self, flow=f)\n else:\n try:\n idx = self._view.index(f)\n except ValueError:\n pass # The value was not in the view\n else:\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f, index=idx)",
"def __duplicate(self):\n\n guides = self.__validate()\n\n top = []\n for _guide in guides:\n\n nodes = guide.duplicate(_guide, hierarchy=True)\n top.append(nodes[0])\n\n # Select top guides\n cmds.select(top, r=True)",
"def focus_next(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index + 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if f.killable:\n f.kill()\n if f in self._view:\n # We manually pass the index here because multiple flows may have the same\n # sorting key, and we cannot reconstruct the index from that.\n idx = self._view.index(f)\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f, index=idx)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n if len(flows) > 1:\n ctx.log.alert(\"Removed %s flows\" % len(flows))",
"def send_flows_to_dp_by_id(self, valve, flows):\n flows = valve.prepare_send_flows(flows)\n self.last_flows_to_dp[valve.dp.dp_id] = flows",
"def focus_next(self, window: Optional[wrappers.Window] = None) -> None:\n pass",
"def reset_flow_lists(self):\n # list of generated inter-arrival times, flow sizes, and data rates for the entire episode\n # dict: ingress_id --> list of arrival times, sizes, drs\n self.flow_arrival_list = []\n self.flow_size_list = []\n self.flow_dr_list = []\n self.flow_list_idx = 0\n self.last_arrival_sum = 0",
"def allow_duplicates(self, allow_duplicates):\n\n self._allow_duplicates = allow_duplicates",
"def get_dup_seqs (self):\n if len(self) != 0:\n for p in self.flow.seqs[self.tcp.seq]:\n if p is not self:\n if len(p) != 0:\n yield p",
"def focus_prev(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index - 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def moveToFirst(self):\n pass",
"def bindWorkflow( self, id, types=None ):\n if types is not None:\n for typ in types:\n if self._chains_by_type is None:\n chain=[]\n else:\n chain = self._chains_by_type.get(typ, [])\n chain = list(chain)\n chain.append(id)\n self.setChainForPortalTypes( ( typ, ), chain )\n else:\n # Add workflow to the default chain\n chain = self._default_chain or []\n if id not in chain:\n chain = list(chain)\n chain.append(id)\n self._default_chain = tuple(chain)",
"def allow_dup(self, allow_dup):\n\n self._allow_dup = allow_dup",
"def single_flow_executor(self, flow):\n for node in flow:\n if self.double_linked_graph[node].outputs is not None:\n continue\n else:\n node_output = self.node_executor(node)\n self.double_linked_graph[node].outputs = node_output",
"def remove_duplicates(self) -> None:\n for i in range(self.length()):\n current = self.sentinel.next\n for j in range(0, self.length() - i - 1):\n if current.value == current.next.value:\n while self.count(current.value) > 0:\n self.remove(current.value)\n current = current.prev\n current = current.next",
"def test_add_passage_with_duplicates(self):\n wf = WordFilter.create_default_filter()\n wf.add_passage(\"Hello World\", None)\n wf.add_passage(\"Hello World\", None)\n\n # Makes sure that the hashtable does not add duplicates.\n self.assertEqual(wf.hash_table[4][\"next\"], None)",
"def duplicated(self, d):\n self._duplicate = d",
"def restore_connectors(self, proxyItem_list, clear_first=False): #Does not take care of connections to items not in the list, that is, deleting them.\n if clear_first:\n for proxyItem in proxyItem_list:\n newlist = [] \n for cnctr in proxyItem.connectorList: \n if (cnctr.itemA in proxyItem_list and\n cnctr.itemB in proxyItem_list):\n# cnctr.hide()\n if cnctr.scene() == self: self.removeItem(cnctr)\n else: newlist.append(cnctr)\n proxyItem.connectorList = newlist\n \n for proxyItem in proxyItem_list: \n for input_name, parent_tuple in proxyItem.fltr.filter_inputs.iteritems(): \n for parentProxy in proxyItem_list:\n if parent_tuple[0] == parentProxy.fltr:\n inputProxy = proxyItem\n outputProxy = parentProxy\n output_name = parent_tuple[1]\n \n break_outer = False\n if not clear_first:\n for cnctr in outputProxy.connectorList:\n if ((cnctr.itemA,cnctr.itemB) == (outputProxy,inputProxy) and\n (cnctr.radioA_name,cnctr.radioB_name) == (output_name,input_name)): #If there already is a connector\n break_outer = True \n break\n if break_outer:\n continue\n \n start_x, start_y = outputProxy.get_radioPos(OUTPUT_LAYOUT, output_name)\n end_x, end_y = inputProxy.get_radioPos(INPUT_LAYOUT, input_name) \n startPos = QPointF(outputProxy.scenePos().x()+start_x, \n outputProxy.scenePos().y()+start_y)\n endPos = QPointF(inputProxy.scenePos().x()+end_x, \n inputProxy.scenePos().y()+end_y) \n connector = Connector( QLineF(startPos, endPos), \n itemA = outputProxy, \n local_posA = QPointF(start_x,start_y),\n itemB = inputProxy,\n local_posB = QPointF(end_x,end_y) )\n connector.radioA_layout = OUTPUT_LAYOUT\n connector.radioB_layout = INPUT_LAYOUT\n connector.radioA_name = output_name\n connector.radioB_name = input_name\n inputProxy.connectorList.append(connector)\n outputProxy.connectorList.append(connector)\n connector.setPen(QPen())\n self.addItem(connector)\n self.scene_update_sig.emit()",
"def reset_focus(self):\n\n self.set_focus()\n\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes the flow from the underlying store and the view.
|
def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:
for f in flows:
if f.id in self._store:
if f.killable:
f.kill()
if f in self._view:
# We manually pass the index here because multiple flows may have the same
# sorting key, and we cannot reconstruct the index from that.
idx = self._view.index(f)
self._view.remove(f)
self.sig_view_remove.send(self, flow=f, index=idx)
del self._store[f.id]
self.sig_store_remove.send(self, flow=f)
if len(flows) > 1:
ctx.log.alert("Removed %s flows" % len(flows))
|
[
"def clear(self) -> None:\n self._store.clear()\n self._view.clear()\n self.sig_view_refresh.send(self)\n self.sig_store_refresh.send(self)",
"def teardown_with(self, flow):\n from x2py.flow import Flow\n backup = Flow.thread_local.current\n Flow.thread_local.current = flow\n\n self._teardown()\n\n Flow.thread_local.current = backup\n\n self.cleanup() # eventsink cleanup",
"def clearLayout(self):\n for index in range(self.flowLayout.count()):\n if self.flowLayout.itemAt(index).widget():\n self.flowLayout.itemAt(index).widget().deleteLater()",
"def delete_tap_flow(self, tap_flow, ignore_missing=True):\n self._delete(\n _tap_flow.TapFlow, tap_flow, ignore_missing=ignore_missing\n )",
"def clearWorkflow(self):\n\n self.mongoCmd(N.clearWorkflow, N.workflow, N.delete_many, {})",
"def dropWorkflow(self):\n\n self.mongoCmd(N.dropWorkflow, N.workflow, N.drop)",
"def clear_not_marked(self) -> None:\n for flow in self._store.copy().values():\n if not flow.marked:\n self._store.pop(flow.id)\n\n self._refilter()\n self.sig_store_refresh.send(self)",
"def remove_view(self,view):\n self._views.remove(view)\n self.model.remove_property_change_listener(view)",
"def req_remove_flow(self, msg):\n msg.__class__ = DR2DPMessageRemoveFlow\n try:\n msg.unpack()\n except:\n self.log.warn('invalid remove_flow message')\n return\n\n if self.click_interface != None:\n self.click_interface.send_msg_to_dr(msg.pack())",
"def remove(self):\n self.layers.pop()",
"def teardown_workflow(self):\n del self.sca_preproc",
"def removefstore(self, vfs, fstore, fpg=None):",
"def delete(self):\n delete_stack(self)",
"def clear(self):\n self.collection.clear()",
"def disarm(self):\n self.act = None\n Framework.removeTimeEvent(self)",
"def delete(self):\n if self._store:\n self._store.delete(self.key)",
"def clear_input_output_flows(sut, br_name):\n sut.vswitch.execute(f\"ovs-ofctl del-flows {br_name} table={Constants.OF_TABLE_INPUT}\")\n sut.vswitch.execute(f\"ovs-ofctl del-flows {br_name} table={Constants.OF_TABLE_OUTPUT}\")",
"def clear_tree(self):\n self.tree_adapter = None\n self._tree = None\n self._clear_scene()",
"def unpublish(self, cls):\r\n self.classes.pop(cls, None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve a flow list specification to an actual list of flows.
|
def resolve(self, flow_spec: str) -> typing.Sequence[mitmproxy.flow.Flow]:
if flow_spec == "@all":
return [i for i in self._store.values()]
if flow_spec == "@focus":
return [self.focus.flow] if self.focus.flow else []
elif flow_spec == "@shown":
return [i for i in self]
elif flow_spec == "@hidden":
return [i for i in self._store.values() if i not in self._view]
elif flow_spec == "@marked":
return [i for i in self._store.values() if i.marked]
elif flow_spec == "@unmarked":
return [i for i in self._store.values() if not i.marked]
else:
filt = flowfilter.parse(flow_spec)
if not filt:
raise exceptions.CommandError(
"Invalid flow filter: %s" % flow_spec)
return [i for i in self._store.values() if filt(i)]
|
[
"def get_flow_list(self):\n if (self.host_url != None) and (self.flow_id != None):\n url = self.host_url + 'flow/' + str(self.flow_id)\n\n # get flow list\n try:\n result = requests.get(url) # GET\n\n except Exception as err:\n self.flow_list = None\n return None\n\n \n # parse response json\n try:\n result = json.loads(result.text)\n\n except ValueError as err:\n return None\n \n else:\n # set flow_list\n if 'nodes' in result:\n self.flow_list = result['nodes']\n return self.flow_list\n else:\n self.flow_list = None\n return None\n \n else:\n self.flow_list = None\n return None",
"def flowmods_from_flows(flows):\n return [flow for flow in flows if isinstance(flow, valve_of.parser.OFPFlowMod)]",
"def resolve_list(value):\n if value is None:\n return []\n\n if isinstance(value, list):\n return [value]\n\n return value",
"def _parse_list(self, inputs):\n # Lists can only be used as inputs in the case where there is a single input node.\n # Validate that this is true. If so, resolve the list into a dict and parse it.\n input_nodes = self.get_nodes_by_role(NodeRole.INPUT)\n if len(input_nodes) == 1:\n _inputs = {next(iter(input_nodes)): inputs}\n else:\n raise CompositionError(\n f\"Inputs to {self.name} must be specified in a dictionary with a key for each of its \"\n f\"{len(input_nodes)} INPUT nodes ({[n.name for n in input_nodes]}).\")\n input_dict, num_inputs_sets = self._parse_dict(_inputs)\n return input_dict, num_inputs_sets",
"def flow_list(\n roles: List[FlowRole] = typer.Option(\n [FlowRole.created_by],\n \"--role\",\n \"-r\",\n help=\"Display Flows where you have the selected role. [repeatable]\",\n case_sensitive=False,\n show_default=True,\n ),\n marker: str = typer.Option(\n None,\n \"--marker\",\n \"-m\",\n help=\"A pagination token for iterating through returned data.\",\n ),\n per_page: int = typer.Option(\n None,\n \"--per-page\",\n \"-p\",\n help=\"The page size to return. Only valid when used without providing a marker.\",\n min=1,\n max=50,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n output_format: FlowDisplayFormat = typer.Option(\n FlowDisplayFormat.json,\n \"--format\",\n \"-f\",\n help=\"Output display format.\",\n case_sensitive=False,\n show_default=True,\n ),\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n flows = fc.list_flows(\n roles=[r.value for r in roles], marker=marker, per_page=per_page\n )\n\n _format_and_display_flow(flows, output_format, verbose=verbose)",
"def _resolve_rules(self, rules_node: Union[dict, str]) -> list:\n # verify whether the node is a string or a ref to a parameter\n if isinstance(rules_node, dict) and rules_node.get('Ref', None):\n # it's a ref then we lookup the info from the parameters\n data = self._template_params.get(rules_node['Ref'], None)\n elif isinstance(rules_node, str):\n data = rules_node\n elif isinstance(rules_node, list):\n data = rules_node\n else:\n raise ValueError(f'Not a valid value for Rules: {rules_node}')\n\n # if the data is a plan string we split it as comma separated\n if isinstance(data, str):\n rules = [elem.strip() for elem in data.split(',')]\n elif isinstance(data, list):\n rules = data\n else:\n raise ValueError(f'Unsupported data in rules. Data: {data}')\n return rules",
"def BgpFlowSpecRangesList(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslist import BgpFlowSpecRangesList\n return BgpFlowSpecRangesList(self)._select()",
"def resolve(self, list, definition_context):\n last = definition_context.attributes.get(list.name)\n\n if last is not None and last.can_inherit():\n last.add(list)\n\n # Inherit inheritance rules.\n last.inherit = list.inherit\n\n definition_context.add_attribute(last)\n\n elif last is None:\n definition_context.add_attribute(list)",
"def test_lazy_parse_sff_handle(self):\n flows, head = lazy_parse_sff_handle(self.rec)\n flows = list(flows)\n self.assertEqual(len(flows),2)\n self.assertEqual(len(head), 11)\n self.assertEqual(head['Key Length'], '4')\n self.assertEqual(head['Key Sequence'], 'TCAG')\n self.assertEqual(flows[0].Name, 'FIQU8OX05GCVRO')\n self.assertEqual(flows[1].Name, 'FIQU8OX05F8ILF')",
"def _resolve_and_filter_type_list(self, typelist):\n # Create a copy we'll modify\n new_typelist = list(typelist)\n for typeval in typelist:\n resolved = self._transformer.resolve_type(typeval)\n if not resolved:\n new_typelist.remove(typeval)\n return new_typelist",
"def getFlows(self, **kwargs):\n\n allParams = ['offset', 'limit']\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getFlows\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/policy/flow'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json'\n\n \n if ('offset' in params):\n queryParams['offset'] = self.apiClient.toPathValue(params['offset'])\n \n if ('limit' in params):\n queryParams['limit'] = self.apiClient.toPathValue(params['limit'])\n \n\n \n\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n \n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'FlowListResult')\n return responseObject",
"def reset_flow_lists(self):\n # list of generated inter-arrival times, flow sizes, and data rates for the entire episode\n # dict: ingress_id --> list of arrival times, sizes, drs\n self.flow_arrival_list = []\n self.flow_size_list = []\n self.flow_dr_list = []\n self.flow_list_idx = 0\n self.last_arrival_sum = 0",
"def resolve(self, flatten=True):\n def _recur(ind):\n if isinstance(ind, RuleIndex) and self[ind] is not None:\n b = []\n l = len(self[ind])-1\n for i, item in enumerate(self[ind]):\n if item is None:\n continue\n if i == 0:\n b.append(_recur(item[0]))\n b.append(_recur(item[1]))\n elif i == l:\n b.append(_recur(item[1]))\n else:\n b.append(_recur(item[1]))\n return b\n else:\n return ind\n \n # start from main sequence / first rule\n items = [_recur(item[1]) for item in self[0]]\n # should we flatten the result?\n return flatten_list(items) if flatten else items",
"def flow_actions_list(\n flow_id: str = typer.Option(\n ...,\n help=\"The ID for the Flow which triggered the Action.\",\n prompt=True,\n ),\n flow_scope: str = typer.Option(\n None,\n help=\"The scope this Flow uses to authenticate requests.\",\n callback=url_validator_callback,\n ),\n roles: List[ActionRole] = typer.Option(\n None,\n \"--role\",\n help=\"Display Actions where you have the selected role. [repeatable]\",\n ),\n statuses: List[ActionStatus] = typer.Option(\n None,\n \"--status\",\n help=\"Display Actions with the selected status. [repeatable]\",\n ),\n marker: str = typer.Option(\n None,\n \"--marker\",\n \"-m\",\n help=\"A pagination token for iterating through returned data.\",\n ),\n per_page: int = typer.Option(\n None,\n \"--per-page\",\n \"-p\",\n help=\"The page size to return. Only valid when used without providing a marker.\",\n min=1,\n max=50,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n\n # This None check and check makes me unhappy but is necessary for mypy to\n # be happy with the enums. If we can figure out what defaults flows uses\n # for flow role/status queries, we can set those here and be done\n statuses_str, roles_str = None, None\n if statuses is not None:\n statuses_str = [s.value for s in statuses]\n if roles is not None:\n roles_str = [r.value for r in roles]\n\n action_list = fc.list_flow_actions(\n flow_id,\n flow_scope,\n statuses=statuses_str,\n roles=roles_str,\n marker=marker,\n per_page=per_page,\n )\n format_and_echo(action_list, verbose=verbose)",
"def test_workflow_verify_rulelist_pass(self, client):\n steps = {\n 'step-1': None,\n 'step-2': None,\n 'step-3': None,\n }\n specification = '\\n'.join([\n '- actions:',\n ' - action: execute-step',\n ' step: step-1',\n ' - action: execute-step',\n ' step: step-2',\n ' - action: execute-operation',\n ' operation: flux:test-operation',\n ' condition: some condition',\n '- actions:',\n ' - action: execute-step',\n ' step: step-3',\n ])\n rulelist = RuleList.unserialize(specification)\n rulelist.verify(steps)",
"def BgpFlowSpecRangesListV4(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv4 import BgpFlowSpecRangesListV4\n return BgpFlowSpecRangesListV4(self)._select()",
"def provision_flows(sut, br_name, flows):\n for flow in flows:\n of_ver = ''\n if 'push_vlan' in flow:\n of_ver = '-O OpenFlow13'\n\n sut.vswitch.execute(f\"ovs-ofctl {of_ver} add-flow {br_name} {flow}\")\n\n sut.vswitch.execute(f\"ovs-ofctl dump-flows {br_name}\")",
"def BgpFlowSpecRangesListV6(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv6 import BgpFlowSpecRangesListV6\n return BgpFlowSpecRangesListV6(self)._select()",
"def resolve_literal_list(node, ctxt):\n val = []\n for e in node.elts:\n e = _resolve_literal(e, ctxt)\n if isinstance(e, ast.AST):\n return node\n val.append(e)\n if isinstance(node, ast.Tuple):\n return tuple(val)\n elif isinstance(node, ast.List):\n return list(val)\n elif isinstance(node, ast.Set):\n return set(val)\n else:\n raise TypeError(\"Attempted to resolve {} as if it were a literal list, tuple, or set\".format(node))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get flow with the given id from the store. Returns None if the flow is not found.
|
def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]:
return self._store.get(flow_id)
|
[
"def getFlow(self, **kwargs):\n\n allParams = ['id']\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getFlow\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/policy/flow/{id}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json'\n\n \n\n \n\n \n if ('id' in params):\n replacement = str(self.apiClient.toPathValue(params['id']))\n replacement = urllib.parse.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'id' + '}',\n replacement)\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n \n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'FlowResult')\n return responseObject",
"def flow_with(self, fac_id):\n\t\treturn self.flows[fac_id]",
"def get_by_id(self, id: str) -> Optional[Activity]:\n return project_context.database[\"activities\"].get(id)",
"def get_by_id(self, id):\n rulesets = [ruleset for ruleset in self.filter() if ruleset.id == id]\n if rulesets:\n return rulesets[0]\n return None",
"def find(self, steam_id: str) -> Optional[AclEntry]:",
"def get_flow(self, flow_name: str) -> \"Flow\":\n if flow_name not in self.flows:\n raise ValueError(\"Flow is not contained in this Storage\")\n\n with TemporaryGitRepo(\n git_clone_url=self.git_clone_url,\n branch_name=self.branch_name,\n tag=self.tag,\n commit=self.commit,\n clone_depth=self.clone_depth,\n ) as temp_repo:\n flow = extract_flow_from_file(\n file_path=os.path.join(temp_repo.temp_dir.name, self.flow_path),\n flow_name=self.flow_name,\n )\n return flow",
"def station_by_id(self, id):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if _[\"properties\"][\"station_id\"] == id]\n log.debug(\"searching for station_id {} found {}\".format(id, station))\n return station[0]\n except:\n log.debug(\"searching for station_id {} found None\".format(id))\n return None",
"def get_share(id):\n from db import Share\n cp = Share.query.filter_by(id=id)\n if cp.count() > 0:\n return cp.first()\n return None",
"def getById (id):\r\n if id in thingsById:\r\n return thingsById[id]\r\n else:\r\n return None",
"def sportsteams_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=SPORTSTEAM_TYPE_URI,\n rdf_type_name=SPORTSTEAM_TYPE_NAME, \n kls=SportsTeam)",
"def FetchById( id ):\n\tresult = None\n\t\n\ttry:\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Trying to grab data from table using Id'))\n\t\tquery = \"SELECT * FROM shop WHERE id = %s;\"\n\t\tdb.cursor.execute( query, ( id, ) )\n\t\tresult = db.cursor.fetchone()\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Successfully grabbed data'))\n\t\t\n\texcept Error as e:\n\t\tLog.error(('SHOPS-Fetch-Id:', e))\n\t\tLog.info(('SHOPS-Fetch-Id:', query))\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Failed to grab data'))\n\treturn result",
"def fetch_pipeline_by_id(cls, id):\n return (\n Session.query(PipelineStore)\n .filter(PipelineStore.id == id)\n .one()\n )",
"def read(self, _id):\n basket = None\n connection_factory = factory.connection_factory(self.connection_factory_type)\n try:\n with connection_factory.get_connection() as client:\n _filter = {\"_id\": ObjectId(_id)}\n basket = client.farmers.basket.find_one(_filter)\n if basket:\n return basket\n self.logger.error(\"Could not find basket with id %s\", _id)\n except Exception as exception:\n self.logger.error(exception)\n return None",
"async def get_one(self, rule_id):\n return await self._rules.find_one({'id': rule_id}, {'_id': 0})",
"def get_wf_by_fw_id(self, dbname, fw_id):\n links_dict = self.client[dbname].workflows.find_one({'nodes': fw_id})\n if not links_dict:\n raise ValueError(\"Could not find a Workflow with fw_id: {}\".format(fw_id))\n fws = map(self.get_fw_by_id, links_dict[\"nodes\"])\n return Workflow(fws, links_dict['links'], links_dict['name'],\n links_dict['metadata'], links_dict['created_on'], links_dict['updated_on'])",
"def flow_display(\n flow_id: str = typer.Argument(...),\n output_format: FlowDisplayFormat = typer.Option(\n FlowDisplayFormat.json,\n \"--format\",\n \"-f\",\n help=\"Output display format.\",\n case_sensitive=False,\n show_default=True,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n flow_get = fc.get_flow(flow_id)\n\n _format_and_display_flow(flow_get, output_format, verbose=verbose)",
"def test_read_flow_id_without_flow_id(self):\n\n self.assertEqual(Log4jParser()._read_flow_id(['not a flow ID'], None), None)",
"def find(self, id):\n\n result = [\n dessert for dessert in self.desserts if dessert.id == id]\n if not result:\n raise ValueError(f\"No dessert with an id of {id} exists.\")\n return result[0]",
"def read_by_id(_id):\n try:\n return Group.get(Group.id == _id)\n except Exception:\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true if view is in marked mode.
|
def get_marked(self) -> bool:
return self.show_marked
|
[
"def is_marked(markable):\n return bool(getattr(markable, _ATTR_DATA_MARKINGS, False))",
"def has_mark(self):",
"def is_marked(self,flag=None): \n if flag is None:\n # No flag -> check whether set is empty\n if self._flags:\n return True\n else:\n return False\n else:\n # Check wether given label is contained in quadcell's set\n return flag in self._flags",
"def is_flagged(self):\n return self._flagged",
"def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)",
"def has_markers(self):\n return self.todolist_enabled or self.pyflakes_enabled\\\n or self.pep8_enabled",
"def is_highlighted(self):\n return self.highlighted",
"def in_view(self):\n \n bbox = self.bbox()\n area = self.parent.canvas.get_visible_area()\n\n y1, y2 = bbox[1], bbox[3]\n v1, v2 = area[1], area[3]\n\n return (y1 > v1 and y2 < v2)",
"def _get_isActive(self) -> \"bool\" :\n return _core.Document__get_isActive(self)",
"def isEditing(self):\n\n return self.group_obj and self.group_obj.id in self.highlights or False",
"def is_markable(m):\n if hasattr(m, _ATTR_DATA_MARKINGS):\n return True\n elif utils.is_entity(m):\n return True\n elif utils.is_sequence(m):\n return False\n else:\n return types.is_castable(m)",
"def isFolderViewActivated(self, context=None):\n if context is None:\n context = self.context\n layout = context.getLayout()\n if layout == \"folderview\":\n return True\n return False",
"def is_flagged(self, key: Key) -> bool:\n return self.get_rank(key) == Rank.FLAG",
"def has_tip(self):\n return self.tip_attached",
"def is_visible(self):\n return self.visible",
"def _get_isVisible(self) -> \"bool\" :\n return _core.Document__get_isVisible(self)",
"def mark(self):\n\n self.is_marked = True\n self.show()",
"def isLocateHighlighting(self) -> \"SbBool\":\n return _coin.SoTransformerManip_isLocateHighlighting(self)",
"def inViewEditor(visible=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Toggle whether to show marked views only.
|
def toggle_marked(self) -> None:
self.show_marked = not self.show_marked
self._refilter()
|
[
"def toggle_view(self):\n self.emit(SIGNAL(\"toggle_view_div\"))",
"def get_marked(self) -> bool:\n return self.show_marked",
"def mark(self):\n\n self.is_marked = True\n self.show()",
"def toggle(self):",
"def toggle_unread_only(self):\n was_unread_only = self.unread_only\n self.action_show_unread_only.setChecked(not was_unread_only)\n self.action_show_all.setChecked(was_unread_only)",
"def toggle_visiblity(self, obj):\n self.dict_pref[obj][\"isVisible\"] = not self.dict_pref[obj][\"isVisible\"]",
"def visToggled(self, index, checkState):\n visible = 0 if checkState == QtCore.Qt.Unchecked else 1\n self._settings.updateSettingArray(\"structureVisibility\", index, visible)",
"def inViewEditor(visible=bool):\n pass",
"def toggle_finder(self, show):\n self.finder.set_visible(show)\n if not show:\n self.editor.setFocus()",
"def notebook_visible_toggle_action(self):\n\n self.notebook.Show(not self.notebook.IsShown())\n self.viewmenu.Check(406, self.notebook.IsShown())\n self.SendSizeEvent()",
"def toggle(self, *_):\n if self._expanded:\n self.collapse()\n else:\n self.expand()",
"def toggleNote(self):\n all_items = (self.graphicsView.scene.items())\n if not all_items:\n return\n for item in all_items:\n if isinstance(item, node.Node):\n if item.note:\n display = item.note.displayNote\n if display:\n item.note.displayNote = False\n else:\n item.note.displayNote = True\n self.graphicsView.scene.update()",
"def set_view_read_only(self):\n if self.reh is not None:\n self.reh.set_read_only()",
"def toggle_visibility(self):\n\n if self.actor.GetVisibility():\n self.actor.VisibilityOff()\n\n else:\n self.actor.VisibilityOn()",
"def rf_nodeEditorVis(self):\n self.mainUi.flNodeEditor.setVisible(self.mainUi.miNodeEditor.isChecked())\n self.setVisible(False)",
"def change_showing_ant(self):\n self.is_showing_ant = not self.is_showing_ant\n time.sleep(0.5)",
"def toggle_treeview(self):\n if not self.treeView.isHidden():\n self.buttons[\"show_structures_tree\"].setText(\n \"Show structures tree\"\n )\n else:\n self.buttons[\"show_structures_tree\"].setText(\n \"Hide structures tree\"\n )\n\n self.treeView.setHidden(not self.treeView.isHidden())",
"def toggle(self, id):\n a = self.objectmanager.objects.get(id=id)\n a.enabled = not a.enabled\n a.save()\n\n analytics.InlineAnalytics.analytics[a.name] = a\n\n return render({\"id\": id, \"status\": a.enabled})",
"def restrictLandmarksToViews(self):\n slicer.mrmlScene.StartState(slicer.mrmlScene.BatchProcessState)\n volumeNodes = self.currentVolumeNodes()\n if self.sliceNodesByViewName:\n landmarks = self.logic.landmarksForVolumes(volumeNodes)\n activeFiducialLists = []\n processedFiducialLists = []\n for landmarkName in landmarks:\n for fiducialList,index in landmarks[landmarkName]:\n if fiducialList in processedFiducialLists:\n continue\n processedFiducialLists.append(fiducialList)\n activeFiducialLists.append(fiducialList)\n displayNode = fiducialList.GetDisplayNode()\n displayNode.RemoveAllViewNodeIDs()\n volumeNodeID = fiducialList.GetAttribute(\"AssociatedNodeID\")\n if volumeNodeID:\n if self.sliceNodesByVolumeID.has_key(volumeNodeID):\n for sliceNode in self.sliceNodesByVolumeID[volumeNodeID]:\n displayNode.AddViewNodeID(sliceNode.GetID())\n for hiddenVolume in self.logic.hiddenFiducialVolumes:\n if hiddenVolume and volumeNodeID == hiddenVolume.GetID():\n displayNode.SetVisibility(False)\n allFiducialLists = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode').values()\n for fiducialList in allFiducialLists:\n if fiducialList not in activeFiducialLists:\n displayNode = fiducialList.GetDisplayNode()\n if displayNode:\n displayNode.SetVisibility(False)\n displayNode.RemoveAllViewNodeIDs()\n displayNode.AddViewNodeID(\"__invalid_view_id__\")\n slicer.mrmlScene.EndState(slicer.mrmlScene.BatchProcessState)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Is this 0 <= index < len(self)?
|
def inbounds(self, index: int) -> bool:
return 0 <= index < len(self)
|
[
"def verify_index(self, index, a_list):\n return 0 <= index < len(a_list)",
"def indexists(list_input, index: int) -> bool:\r\n return index <= len(list_input) - 1",
"def contains_index(self, index):\n return self.point_def.index <= index <= self.point_def.array_last_index",
"def is_on_boundary(self, index):\n for i in range(3):\n if index[i] == 0 or index[i] == self.shape[i]-1:\n return True\n return False",
"def contains(self, index):\n \n return self.ind[0] <= index <= self.ind[1]",
"def is_on_last_item(self):\n return self.index == len(self) - 1",
"def has_next(self):\n\n return self.index < len(self.string)",
"def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size",
"def checkNext(self):\n if self.ptr >= len(self.mtd):\n return False\n #print( \"checkNext returning true\")\n return True",
"def check_undefined_self(slices, self_var):\n for i, slice in enumerate(slices):\n if check_one_undefined_slice_self(slice, self_var):\n return True, i\n return False, -1",
"def __contains__(self, cycle_index: object) -> bool:\n return self.lower <= cycle_index <= self.upper # type: ignore",
"def is_index_available(self, value: int, lst: list) -> bool:\n return value >= 0 and value < len(lst)",
"def __eq__(self, other):\n if self.index == other.index:\n return True\n return False",
"def __contains__(self, offset):\n if offset < 0:\n if self.first is not None:\n return False\n else:\n return self.last >= -offset\n elif self.first is None:\n return False\n elif self.last is None:\n return True\n else:\n return self.first <= offset <= self.last",
"def _indexes_valid(self):\n return self.input_index in range(self.num_inputs) and self.output_index in range(self.num_outputs)",
"def __gt__(self, other):\n return self._index > other._index or (self._index == other._index and self._ic > other._ic)",
"def __getitem__(self, index: int) -> Cell:\n\n if index[0] <= self.N and index[1] <= self.N:\n return self._safe_get(index)\n return None",
"def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0",
"def is_unindexed(self, component):\n return component.index < 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates a list of flows. If flow is not in the state, it's ignored.
|
def update(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:
for f in flows:
if f.id in self._store:
if self.filter(f):
if f not in self._view:
self._base_add(f)
if self.focus_follow:
self.focus.flow = f
self.sig_view_add.send(self, flow=f)
else:
# This is a tad complicated. The sortedcontainers
# implementation assumes that the order key is stable. If
# it changes mid-way Very Bad Things happen. We detect when
# this happens, and re-fresh the item.
self.order_key.refresh(f)
self.sig_view_update.send(self, flow=f)
else:
try:
idx = self._view.index(f)
except ValueError:
pass # The value was not in the view
else:
self._view.remove(f)
self.sig_view_remove.send(self, flow=f, index=idx)
|
[
"def _update_flows(self):\n ts = self.asbce.lastflows_timestamp\n fd = self.asbce.flows()\n for tag in (k for k, v in self.tracker.db.iteritems()\n if v.status in (\"ONCALL\", \"ONHOLD\")):\n try:\n d = {}\n for k in self.tracker.db[tag].flows[-1][-1].iterkeys():\n v = fd.get((k[0], k[1]), None)\n if v:\n d.update({k: v})\n elif self._is_call_dead(tag, ts):\n self.tracker.db[tag].status = \"LOST\"\n self.tracker.db[tag].endtime = datetime.now()\n self._remove_active_tcpdump(tag)\n if d:\n self.tracker.db[tag].flows[-1].append(Flows(d=d, timestamp=ts))\n except:\n pass\n self.refresh = True",
"def reset_flow_lists(self):\n # list of generated inter-arrival times, flow sizes, and data rates for the entire episode\n # dict: ingress_id --> list of arrival times, sizes, drs\n self.flow_arrival_list = []\n self.flow_size_list = []\n self.flow_dr_list = []\n self.flow_list_idx = 0\n self.last_arrival_sum = 0",
"def update_flows(self,t,new_flows):\n ti=self.t_sec_to_index(t)\n \n stride=4+self.n_exch*4\n flo_fn=self.get_path('flows-file')\n with open(flo_fn,'rb+') as fp:\n fp.seek(stride*ti)\n tstamp_data=fp.read(4)\n if len(tstamp_data)<4:\n self.log.info(\"update_flows: File is too short\")\n return False\n else:\n tstamp=np.frombuffer(tstamp_data,'i4')[0]\n if tstamp!=t:\n self.log.warning(\"update_flows: time stamp mismatch: %d != %d\"%(tstamp,t))\n fp.write(new_flows.astype('f4'))\n return True",
"def setStateList (self, states):\n\t\tself.state_list = states",
"def duplicate(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n dups = [f.copy() for f in flows]\n if dups:\n self.add(dups)\n self.focus.flow = dups[0]\n ctx.log.alert(\"Duplicated %s flows\" % len(dups))",
"def update_states_ii(self, states: List[State], action: Action, percepts: Percepts,\n filter_terminal: bool = False) -> List[State]:\n pass",
"def add_flow(self,flow):\n state = self.constructor.get_state(flow, self.get_id())\n if state:\n self.state += state\n return True\n else:\n return False",
"def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if f.killable:\n f.kill()\n if f in self._view:\n # We manually pass the index here because multiple flows may have the same\n # sorting key, and we cannot reconstruct the index from that.\n idx = self._view.index(f)\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f, index=idx)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n if len(flows) > 1:\n ctx.log.alert(\"Removed %s flows\" % len(flows))",
"def flowmods_from_flows(flows):\n return [flow for flow in flows if isinstance(flow, valve_of.parser.OFPFlowMod)]",
"def send_flows_to_dp_by_id(self, valve, flows):\n flows = valve.prepare_send_flows(flows)\n self.last_flows_to_dp[valve.dp.dp_id] = flows",
"def add_ovs_flows():\n check_output(split(\"ovs-ofctl del-flows s1\"))\n\n check_output(\n split(\n 'ovs-ofctl add-flow s1 \"{proto},in_port={in_port},actions=output={out_port}\"'.format(\n **{\n \"in_port\": get_ofport(\"s1-client\"),\n \"out_port\": get_ofport(\"s1-vnf\"),\n \"proto\": \"udp\",\n }\n )\n )\n )\n check_output(\n split(\n 'ovs-ofctl add-flow s1 \"{proto},in_port={in_port},actions=output={out_port}\"'.format(\n **{\n \"in_port\": get_ofport(\"s1-server\"),\n \"out_port\": get_ofport(\"s1-client\"),\n \"proto\": \"udp\",\n }\n )\n )\n )",
"def setvalue(\n self,\n flows: typing.Sequence[mitmproxy.flow.Flow],\n key: str, value: str\n ) -> None:\n updated = []\n for f in flows:\n self.settings[f][key] = value\n updated.append(f)\n ctx.master.addons.trigger(\"update\", updated)",
"async def many_flow_run_states(flow, session, db):\n\n # clear all other flow runs\n await session.execute(sa.delete(db.FlowRun))\n await session.execute(sa.delete(db.FlowRunState))\n\n for _ in range(5):\n flow_run = await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.actions.FlowRunCreate(flow_id=flow.id, flow_version=1),\n )\n\n states = [\n db.FlowRunState(\n flow_run_id=flow_run.id,\n **schemas.states.State(\n type={\n 0: schemas.states.StateType.PENDING,\n 1: schemas.states.StateType.RUNNING,\n 2: schemas.states.StateType.COMPLETED,\n }[i],\n timestamp=pendulum.now(\"UTC\").add(minutes=i),\n ).orm_dict(),\n )\n for i in range(3)\n ]\n\n flow_run.set_state(states[-1])\n\n session.add_all(states)\n await session.commit()",
"def update_tap_flow(self, tap_flow, **attrs):\n return self._update(_tap_flow.TapFlow, tap_flow, **attrs)",
"def flow_arrangement_enforcer(self):\n blocks = self._topology.blocks\n log.debug(\"Enforcing Flow Arrangement\")\n\n maxBlockIdx = max([x for x in blocks])\n currentIdx = 0\n while currentIdx < maxBlockIdx:\n offsetIdx = 0\n #is the current block a destination? \n if not blocks[currentIdx].isFlowDest:\n #if it's not an origin, keep going.\n if not blocks[currentIdx].isFlowOrigin:\n pass\n #If it *is* an origin, what is its destination?\n else:\n destIdx = map(lambda x: x.dest.block.index, blocks[currentIdx].flowsGoingOut)\n if len(destIdx) > 1:\n pass\n #TODO\n else:\n destBlock = blocks[destIdx[0]]\n flowsGoingInToDestBlock = destBlock.flowsComingIn\n originsOfFlowsGoingInToDestBlock = map(lambda f: f.origin.block, flowsGoingInToDestBlock)\n for o in originsOfFlowsGoingInToDestBlock:\n #Don't move the one we're sitting on (or ones we've already processed)!\n if o.index > (currentIdx+offsetIdx):\n #Move each origin of the flows going into the dest block in front of it...\n offsetIdx += 1\n self.move_block(o.index, currentIdx+offsetIdx)\n #Double check that your dest block hasn't moved:\n offsetIdx += 1\n self.move_block(destBlock.index, currentIdx+offsetIdx)\n #If it *is* a destination, shunt it to the end and keep going.\n else:\n self.move_block(currentIdx, maxBlockIdx)\n currentIdx -= 1\n #Refresh current block indices\n blocks = self._topology.blocks\n currentIdx += (offsetIdx + 1)\n log.debug(\"Finished Enforcing Flow Arrangement\")\n blocks = self._topology.blocks",
"def update(self):\n transitions = self.__states[self.currentState]\n for (check, event, nextState) in transitions:\n if check():\n self.currentState = nextState\n print \"sm new state: \", nextState\n event()\n\n action = self.actions.get(self.currentState)\n if action is not None:\n action()",
"def provision_flows(sut, br_name, flows):\n for flow in flows:\n of_ver = ''\n if 'push_vlan' in flow:\n of_ver = '-O OpenFlow13'\n\n sut.vswitch.execute(f\"ovs-ofctl {of_ver} add-flow {br_name} {flow}\")\n\n sut.vswitch.execute(f\"ovs-ofctl dump-flows {br_name}\")",
"def flow_actions_list(\n flow_id: str = typer.Option(\n ...,\n help=\"The ID for the Flow which triggered the Action.\",\n prompt=True,\n ),\n flow_scope: str = typer.Option(\n None,\n help=\"The scope this Flow uses to authenticate requests.\",\n callback=url_validator_callback,\n ),\n roles: List[ActionRole] = typer.Option(\n None,\n \"--role\",\n help=\"Display Actions where you have the selected role. [repeatable]\",\n ),\n statuses: List[ActionStatus] = typer.Option(\n None,\n \"--status\",\n help=\"Display Actions with the selected status. [repeatable]\",\n ),\n marker: str = typer.Option(\n None,\n \"--marker\",\n \"-m\",\n help=\"A pagination token for iterating through returned data.\",\n ),\n per_page: int = typer.Option(\n None,\n \"--per-page\",\n \"-p\",\n help=\"The page size to return. Only valid when used without providing a marker.\",\n min=1,\n max=50,\n ),\n flows_endpoint: str = typer.Option(\n PROD_FLOWS_BASE_URL,\n hidden=True,\n callback=flows_endpoint_envvar_callback,\n ),\n verbose: bool = verbosity_option,\n):\n fc = create_flows_client(CLIENT_ID, flows_endpoint)\n\n # This None check and check makes me unhappy but is necessary for mypy to\n # be happy with the enums. If we can figure out what defaults flows uses\n # for flow role/status queries, we can set those here and be done\n statuses_str, roles_str = None, None\n if statuses is not None:\n statuses_str = [s.value for s in statuses]\n if roles is not None:\n roles_str = [r.value for r in roles]\n\n action_list = fc.list_flow_actions(\n flow_id,\n flow_scope,\n statuses=statuses_str,\n roles=roles_str,\n marker=marker,\n per_page=per_page,\n )\n format_and_echo(action_list, verbose=verbose)",
"def update(self):\n states = []\n for cell in self._cells:\n cell\n neighbors = self.get_neighbors(cell)\n state = self.transition_function(cell, neighbors)\n states.append(state)\n for i, cell in enumerate(self._cells):\n cell.update(states[i])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the main page with all the posts. The index uses pagination, 5 posts per page. request The request from the client. Return the base.html page if the page exists otherwise returns 400 error page.
|
def index(request):
tmp_posts = Post.objects.order_by("-pub_date")
posts_page = Paginator(tmp_posts, 5)
# Default to page one when none is given
page = request.GET.get("page", 1)
try:
posts = posts_page.page(page)
except EmptyPage:
return render(
request,
"error.html",
{
"message": f"Could not find page: {page}",
"title_text": "Page not found - Post"
},
status=400
)
return render(
request,
"base.html",
{
"posts": posts,
}
)
|
[
"def get(self):\n posts = Post.query()\n self.render('post-index.html',\n posts=posts,\n user=self.user)",
"def home(): #changed from redirect to 5 most recent posts. \n posts = Post.query.order_by(Post.created_at.desc()).limit(5).all() #pulls posts from DB, from all posts, and limits reponse to 5\n return render_template(\"posts/homepage.html\", posts=posts)",
"def index():\n form = PostForm()\n\n if form.validate_on_submit():\n # try to identify the language being used\n language = guess_language(form.post.data)\n if language == 'UNKNOWN' or len(language) > 5:\n language = ''\n post = Post(body=form.post.data, author=current_user, language=language)\n db.session.add(post)\n db.session.commit()\n flash(_l('Your post is now live!'))\n # Post/Redirect/Get pattern\n return redirect(url_for('main.index'))\n\n page = request.args.get('page', 1, type=int)\n posts = current_user.followed_posts().paginate(\n page, current_app.config['POSTS_PER_PAGE'], False)\n # posts.items is used to retrieve posts from the paginated object \n return render_template('index.html', title=_l('Home'), form=form, posts=posts.items)",
"def search(request):\n tmp_posts = Post.objects.order_by(\"-pub_date\").filter(header__contains=request.GET[\"header\"])\n posts_page = Paginator(tmp_posts, 5)\n # Default to page one when none is given\n page = request.GET.get(\"page\", 1)\n try:\n posts = posts_page.page(page)\n except EmptyPage:\n return render(\n request,\n \"error.html\",\n {\n \"message\": f\"Could not find page: {page}\",\n \"title_text\": \"Page not found - Post\"\n },\n status=400\n )\n\n # Easy the logic on the template file by giving a header\n # parameter. This should be used on the pagination, concatennating\n # a url (the same as this view was called is preferable...)\n header = f\";header={request.GET['header']}\"\n return render(request, \"base.html\", {\"posts\": posts, \"header\": header})",
"def basic_pages():\n return make_response(open('src/templates/index.html').read())",
"def articles(request):\n article_list = Article.objects.all().order_by(\"-date_created\")\n paginator = Paginator(article_list, 5)\n page = request.GET.get(\"page\")\n\n try:\n article_list = paginator.page(page)\n except PageNotAnInteger:\n article_list = paginator.page(1)\n except EmptyPage:\n article_list = paginator.page(paginator.num_pages)\n\n context = {\n \"articles\": article_list,\n }\n return render(request, \"mhubblog_app/articles_list.html\", context)",
"def blog_index(request):\n posts = Post.objects.all().order_by('-created_on')\n context = {\n \"posts\": posts,\n }\n return render(request, \"blog_index.html\", context)",
"def blog_index(request):\n posts = Post.objects.all().order_by('-created_on')\n context = {\n \"posts\": posts,\n }\n return render(request, \"blog/blog_index.html\", context)",
"def get(self, posts=\"\"):\n posts = list(Post.get_all())\n\n self.render(\"blog.html\", user=self.user, posts=posts)",
"def view_paged(request):\n \n # intialize results in case of failure...\n results_list, results_slice = ([], [])\n \n # get query\n query = responses.get_request_arg(request, ['q', 'query', 'search'], default=\"\")\n query_safe = mark_for_escaping(query)\n \n # check query\n search_warning = searchtools.valid_query(query)\n\n # search okay?\n if search_warning == '':\n # get initial results\n results_list = searchtools.search_all(query, projects_first=True)\n \n # get overall total count\n results_count = len(results_list)\n \n # get args\n page_args = responses.get_paged_args(request, results_count)\n # results slice\n if results_count > 0:\n results_slice = utilities.slice_list(results_list,\n starting_index=page_args['start_id'],\n max_items=page_args['max_items'])\n else:\n results_slice = []\n \n # get last index.\n end_id = str(page_args['start_id'] + len(results_slice))\n return responses.clean_response(\"searcher/results_paged.html\",\n {\"request\": request,\n \"search_warning\": search_warning,\n \"results_list\": results_slice,\n \"query_text\": query,\n \"query_safe\": query_safe,\n \"start_id\": (page_args['start_id'] + 1),\n \"end_id\": end_id,\n \"results_count\": results_count,\n \"prev_page\": page_args['prev_page'],\n \"next_page\": page_args['next_page'],\n \"has_prev\": (page_args['start_id'] > 0),\n \"has_next\": (page_args['start_id'] < (results_count - page_args['max_items'])),\n \"extra_style_link_list\": [utilities.get_browser_style(request),\n \"/static/css/searcher.min.css\",\n \"/static/css/highlighter.min.css\"],\n })",
"def main_page():\n return render_template('base.html', title='Главная страница', news=\n get_news(0, 10), templates=[\"news.html\", \"news_scroll.html\"])",
"def notebooks_index():\n return render_template('blog/index.html', posts=[])",
"def index(request):\n\n model3d_list = Model3d.objects.all()\n paginator = Paginator(model3d_list, 5)\n\n page = request.GET.get('page')\n try:\n model3ds = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n model3ds = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n model3ds = paginator.page(paginator.num_pages)\n\n return render(request, 'sketchfab/index.html', context={'model3ds': model3ds})",
"def main_page():\n url = request.args.get(\"url\", None)\n headline = request.args.get(\"headline\", None)\n\n if headline == None:\n if 'facebook' not in tldextract.extract(url).domain.lower():\n request_response = requests.get(url, allow_redirects=True)\n soup = BeautifulSoup(request_response.text, \"lxml\")\n headline = str(soup.title.string)\n\n percentage, similar_found = run_model(headline)\n\n if similar_found == None:\n response = app.response_class(\n response=json.dumps({\"headline\": headline, \"percentage\": percentage}),\n status=200,\n mimetype=\"application/json\"\n )\n else:\n response = app.response_class(\n response=json.dumps({\"headline\": headline, \"percentage\": percentage, \"similarArticles\": similar_found}),\n status=200,\n mimetype=\"application/json\"\n )\n return response",
"def get(self):\n logger.info('get main page')\n return self.render(os.path.join(TEMPLATE_DIR, 'index.html'),\n static='', base_url=config.BASE_URL, notebook=config.NOTEBOOK,\n token='none')",
"def view_index(context, request):\n entries = (e for e in context.__parent__ if isinstance(e, BlogEntry))\n sorted_entries = sorted(entries, key=lambda x: x.published_date)\n content = context.render_as_template(entries=sorted_entries)\n return Response(body=content)",
"def search_page():\n queryString = ''\n if request.query_string is not None:\n queryString = request.query_string.decode('utf-8')\n ready4work = settings.ready_for_work\n if settings.ready_for_work:\n ready4work = sc.is_alive()\n\n return render_template('index.html',\n ready_for_work=ready4work,\n locale=get_locale(),\n corpus_name=settings.corpus_name,\n languages=settings.languages,\n all_lang_search=settings.all_language_search_enabled,\n transliterations=settings.transliterations,\n input_methods=settings.input_methods,\n keyboards_by_tier=json.dumps(settings.keyboards,\n ensure_ascii=False, indent=-1),\n media=settings.media,\n video=settings.video,\n images=settings.images,\n youtube=settings.media_youtube,\n gloss_search_enabled=settings.gloss_search_enabled,\n negative_search_enabled=settings.negative_search_enabled,\n fulltext_search_enabled=settings.fulltext_search_enabled,\n year_sort_enabled=settings.year_sort_enabled,\n debug=settings.debug,\n subcorpus_selection=settings.search_meta,\n sentence_meta=settings.sentence_meta,\n word_fields_by_tier=json.dumps(settings.word_fields_by_tier,\n ensure_ascii=False, indent=-1),\n auto_switch_tiers=json.dumps(settings.auto_switch_tiers,\n ensure_ascii=False, indent=-1),\n generate_dictionary=settings.generate_dictionary,\n citation=settings.citation,\n start_page_url=settings.start_page_url,\n default_view=settings.default_view,\n max_request_time=settings.query_timeout + 1,\n max_page_size=MAX_PAGE_SIZE,\n locales=settings.interface_languages,\n random_seed=get_session_data('seed'),\n query_string=queryString)",
"def wiki_index(request):\n pages = WikiPage.objects.all()\n \n return render_to_response(\"doc_wiki/index.html\", {\n \"pages\": pages,\n }, context_instance=RequestContext(request))",
"def index():\n title = \"Home- Welcome to the News Highlights Website\"\n \n # Getting the news sources\n news_sources = get_sources('general')\n return render_template('index.html', title=title, sources=news_sources)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Search for posts according to what was given. The search will be based if the header contains some or all the text of what was given. The search uses pagination, 5 Posts per page. If the given page parameter returns an EmptyPage exception an error page will be given. request The request from the client. Return the base.html page if the page exists otherwise returns 400 error page.
|
def search(request):
tmp_posts = Post.objects.order_by("-pub_date").filter(header__contains=request.GET["header"])
posts_page = Paginator(tmp_posts, 5)
# Default to page one when none is given
page = request.GET.get("page", 1)
try:
posts = posts_page.page(page)
except EmptyPage:
return render(
request,
"error.html",
{
"message": f"Could not find page: {page}",
"title_text": "Page not found - Post"
},
status=400
)
# Easy the logic on the template file by giving a header
# parameter. This should be used on the pagination, concatennating
# a url (the same as this view was called is preferable...)
header = f";header={request.GET['header']}"
return render(request, "base.html", {"posts": posts, "header": header})
|
[
"def search():\n if not g.search_form.validate():\n return redirect(url_for('main.explore'))\n \n page = request.args.get('page', 1, type=int)\n posts, total = Post.search(g.search_form.q.data, page, \n current_app.config['POSTS_PER_PAGE'])\n \n next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \\\n if total > page * current_app.config['POSTS_PER_PAGE'] else None\n \n prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \\\n if page > 1 else None\n \n return render_template('search.html', title=_l('Search'), posts=posts,\n next_url=next_url, prev_url=prev_url)",
"def view_paged(request):\n \n # intialize results in case of failure...\n results_list, results_slice = ([], [])\n \n # get query\n query = responses.get_request_arg(request, ['q', 'query', 'search'], default=\"\")\n query_safe = mark_for_escaping(query)\n \n # check query\n search_warning = searchtools.valid_query(query)\n\n # search okay?\n if search_warning == '':\n # get initial results\n results_list = searchtools.search_all(query, projects_first=True)\n \n # get overall total count\n results_count = len(results_list)\n \n # get args\n page_args = responses.get_paged_args(request, results_count)\n # results slice\n if results_count > 0:\n results_slice = utilities.slice_list(results_list,\n starting_index=page_args['start_id'],\n max_items=page_args['max_items'])\n else:\n results_slice = []\n \n # get last index.\n end_id = str(page_args['start_id'] + len(results_slice))\n return responses.clean_response(\"searcher/results_paged.html\",\n {\"request\": request,\n \"search_warning\": search_warning,\n \"results_list\": results_slice,\n \"query_text\": query,\n \"query_safe\": query_safe,\n \"start_id\": (page_args['start_id'] + 1),\n \"end_id\": end_id,\n \"results_count\": results_count,\n \"prev_page\": page_args['prev_page'],\n \"next_page\": page_args['next_page'],\n \"has_prev\": (page_args['start_id'] > 0),\n \"has_next\": (page_args['start_id'] < (results_count - page_args['max_items'])),\n \"extra_style_link_list\": [utilities.get_browser_style(request),\n \"/static/css/searcher.min.css\",\n \"/static/css/highlighter.min.css\"],\n })",
"def index(request):\n tmp_posts = Post.objects.order_by(\"-pub_date\")\n posts_page = Paginator(tmp_posts, 5)\n # Default to page one when none is given\n page = request.GET.get(\"page\", 1)\n try:\n posts = posts_page.page(page)\n except EmptyPage:\n return render(\n request,\n \"error.html\",\n {\n \"message\": f\"Could not find page: {page}\",\n \"title_text\": \"Page not found - Post\"\n },\n status=400\n )\n \n return render(\n request,\n \"base.html\", \n {\n \"posts\": posts,\n }\n )",
"def search():\n hashtag = request.args.get('ht')\n if hashtag is None or not check_hashtag(hashtag):\n return make_response(render_template(\"search.html\", title=APP_NAME,\n file=\"error.html\", message=ERROR_INVALID))\n if hashtag[0] == \"#\":\n hashtag = hashtag[1:]\n if not start_search_hashtag(hashtag):\n return make_response(render_template(\"search.html\", title=APP_NAME,\n file=\"error.html\", message=ERROR_SERVER))\n return make_response(render_template(\"search.html\", title=APP_NAME,\n file=\"loading.html\", hashtag=hashtag))",
"def search():\n \n if 'q' in request.args:\n id = request.args['q']\n else:\n return \"<h1>400</h1> <p>No query field provided. Please specify an query.</p>\", 400\n\n if 'p' in request.args:\n page = int(request.args['p'])\n else:\n return \"<h1>400</h1><p> No page field provided. Please specify a page.</p>\", 400\n\n if 'year' in request.args and 'dtype' in request.args:\n year = request.args['year'].split(',')\n dtype = request.args['dtype']\n body = {\"query\":{\"bool\":{\"must\":[{\"multi_match\": {\"query\": id, \"fields\": [\"tag\", \"cardHtml\"]}},{\"terms\": {\"year\": year}}]}}}\n res = es.search(index= str(dtype), from_ = (int(page)*20), size = 20, doc_type=\"cards\", track_total_hits = True, body = body)\n \n elif 'year' in request.args:\n year = request.args['year'].split(',')\n body = {\"query\":{\"bool\":{\"must\":[{\"multi_match\": {\"query\": id, \"fields\": [\"tag\", \"cardHtml\"]}},{\"terms\": {\"year\": year}}]}}}\n res = es.search(index= \"_all\", from_ = (int(page)*20), size = 20, doc_type=\"cards\", track_total_hits = True, body=body)\n \n elif 'dtype' in request.args:\n dtype = request.args['dtype']\n res = es.search(index= str(dtype), doc_type=\"cards\", from_ = (int(page)*20), track_total_hits = True, size = 20, body={\"query\": {\"multi_match\": {\"query\": id, \"fields\": [ \"tag\", \"cardHtml\" ]}}})\n else:\n res = es.search(index= \"_all\", doc_type=\"cards\", from_ = (int(page)*20), track_total_hits = True, size = 20, body={\"query\": {\"multi_match\": {\"query\": id, \"fields\": [ \"tag\", \"cardHtml\" ]}}})\n \n tags = []\n results = {}\n i=0\n\n for doc in res['hits']['hits']:\n if doc['_source']['tag'] not in tags:\n tags.append(doc['_source']['cardHtml'])\n results['_source' + str(i)] = ('_id: ' + doc['_id'], doc['_source'], 'dtype: ' + doc['_index'])\n i+=1\n else:\n es.delete_by_query(index=\"_all\", doc_type=\"cards\", wait_for_completion = False, body={\"query\": {\"match_phrase\": {\"_id\": doc['_id']}}})\n \n return results",
"def search(self, params={}):\n params['limit'] = self.single_page_limit\n h_url = self.query_url.format(query=urlencode(params))\n #print h_url\n json = requests.get(h_url).json()\n return json",
"def search_page():\n queryString = ''\n if request.query_string is not None:\n queryString = request.query_string.decode('utf-8')\n ready4work = settings.ready_for_work\n if settings.ready_for_work:\n ready4work = sc.is_alive()\n\n return render_template('index.html',\n ready_for_work=ready4work,\n locale=get_locale(),\n corpus_name=settings.corpus_name,\n languages=settings.languages,\n all_lang_search=settings.all_language_search_enabled,\n transliterations=settings.transliterations,\n input_methods=settings.input_methods,\n keyboards_by_tier=json.dumps(settings.keyboards,\n ensure_ascii=False, indent=-1),\n media=settings.media,\n video=settings.video,\n images=settings.images,\n youtube=settings.media_youtube,\n gloss_search_enabled=settings.gloss_search_enabled,\n negative_search_enabled=settings.negative_search_enabled,\n fulltext_search_enabled=settings.fulltext_search_enabled,\n year_sort_enabled=settings.year_sort_enabled,\n debug=settings.debug,\n subcorpus_selection=settings.search_meta,\n sentence_meta=settings.sentence_meta,\n word_fields_by_tier=json.dumps(settings.word_fields_by_tier,\n ensure_ascii=False, indent=-1),\n auto_switch_tiers=json.dumps(settings.auto_switch_tiers,\n ensure_ascii=False, indent=-1),\n generate_dictionary=settings.generate_dictionary,\n citation=settings.citation,\n start_page_url=settings.start_page_url,\n default_view=settings.default_view,\n max_request_time=settings.query_timeout + 1,\n max_page_size=MAX_PAGE_SIZE,\n locales=settings.interface_languages,\n random_seed=get_session_data('seed'),\n query_string=queryString)",
"def do_search(request):\n comics_list = Comic.objects.all()\n query = request.GET.get('q')\n if query:\n comics_list = Comic.objects.filter(\n Q(name__icontains=query) | Q(grade__icontains=query) |\n Q(brand__icontains=query)\n ).distinct()\n paginator = Paginator(comics_list, 4)\n\n page = request.GET.get('page')\n try:\n comics = paginator.page(page)\n except PageNotAnInteger:\n\n comics = paginator.page(1)\n except EmptyPage:\n\n comics = paginator.page(paginator.num_pages)\n return render(request, \"search.html\", {\"comics\": comics})",
"def search_results(request):\n search_query = request.GET.get('search_query', '')\n location = request.GET.get('location', '')\n category = request.GET.get('category', '')\n\n if search_query or location or category:\n all_results = Book.objects.filter(\n title__icontains=search_query,\n location__icontains=location,\n category__icontains=category\n )\n\n paginator = Paginator(all_results, 10)\n page = request.GET.get('page')\n if paginator.num_pages > 1:\n p = True\n else:\n p = False\n try:\n results = paginator.page(page)\n\n except PageNotAnInteger:\n results = paginator.page(1)\n\n except EmptyPage:\n results = paginator.page(paginator.num_pages)\n\n page_obj = results\n\n show_search_box = True\n GOOGLE_API_KEY = settings.GOOGLE_API_KEY\n\n return render(request, 'books/search_results.html', {\n 'results': results,\n 'show_search_box': show_search_box,\n 'GOOGLE_API_KEY': GOOGLE_API_KEY,\n 'p': p,\n 'page': page,\n 'page_obj': page_obj\n })\n else:\n return redirect('homepage')",
"def get_posts_(search_request):\n if search_request['key_words'] == [\"\"]:\n search_request['key_words'] = []\n\n if ('login' not in search_request) | ('password' not in search_request):\n search_request['login'] = FB_LOGIN\n search_request['password'] = FB_PASSWORD\n\n browser = fbb(search_request['login'], search_request['password'])\n\n posts = []\n for page_id in search_request['ids']:\n posts += browser.get_posts(page_id, 100, search_request['from_date'],\n search_request['is_need_comments'])\n\n browser.close()\n browser.quit()\n posts = [post.to_dict() for post in posts if\n get_post_relevance(post.message, search_request) > 0]\n\n if search_request['from_date'] is not None:\n posts = [post for post in posts if post['date'] >= search_request['from_date']]\n if search_request['to_date'] is not None:\n posts = [post for post in posts if post['date'] <= search_request['to_date']]\n\n if search_request['is_need_comments'] == 0:\n for post in posts:\n post['comments'] = []\n\n return posts",
"def test_post_search_pagination(self):\n post_collection = [generate_post_form_dto() for _ in range(10)]\n for post in post_collection:\n self.simulate_post(\n PostCollectionResource.route,\n body=to_json(PostFormDtoSerializer, post),\n headers=self.headers)\n search_settings = PostSearchSettingsDto(\n query=self.user.username,\n options=[PostSearchOptions.AUTHOR])\n post_search_res = self.simulate_post(\n PostSearchResource.route,\n body=to_json(PostSearchSettingsDtoSerializer, search_settings),\n headers=self.headers,\n params={\n 'start': 5,\n 'count': 5\n })\n self.assertEqual(post_search_res.status_code, 201)\n posts = post_search_res.json.get('posts')\n self.assertEqual(len(posts), 5)\n for res, post in zip(posts, post_collection[5:]):\n self.assertEqual(res['title'], post.title)\n self.assertEqual(res['description'], post.description)\n self.assertEqual(res['content'], post.content)\n self.assertEqual(res['private'], post.private)\n self.assertEqual(res['featured'], post.featured)\n self.assertEqual(len(res['tags']), len(post.tags))\n for expected, found in zip(res['tags'], post.tags):\n self.assertEqual(expected, found)",
"def blog(request):\n blogposts = BlogPost.objects.all()\n query = None\n\n # Handling the functionality of searching for a blogpost,\n # if the user has made a search request\n if 'query' in request.GET:\n query = request.GET['query']\n\n # Handling if user clicks search button without\n # entering search criteria\n if not query:\n messages.error(request, \"No search qriteria entered\")\n return redirect(reverse('blogposts'))\n # Taking in the search criteria and filtering by them\n queries = Q(\n title__icontains=query) | Q(content__icontains=query)\n blogposts = blogposts.filter(queries)\n\n context = {\n 'blogposts': blogposts,\n 'search_term': query,\n }\n\n return render(request, 'blog/blog.html', context)",
"def searchPage():\n form = SearchForm(request.form)\n logger.info(\"Rendering SearchPage.html and sending to: %s\", request.remote_addr)\n return render_template('SearchPage.html', form=form)",
"def request(self, n_pages=100, n_per_page=50):\n\n # NOTE: This main search query has a type, but the query string also has a type.\n # ref (\"search\"): https://developer.github.com/v4/query/#connections\n # Collect paginated issues\n self.issues_and_or_prs = []\n for ii in range(n_pages):\n github_search_query = [\n 'first: %s' % n_per_page,\n 'query: \"%s\"' % self.query,\n 'type: ISSUE',\n ]\n if ii != 0:\n github_search_query.append('after: \"%s\"' % pageInfo['endCursor'])\n\n ii_gql_query = self.gql_template.format(\n query=', '.join(github_search_query),\n comments=comments_query,\n base_elements=base_elements,\n )\n ii_request = requests.post('https://api.github.com/graphql', json={'query': ii_gql_query}, headers=self.headers)\n if ii_request.status_code != 200:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(ii_request.status_code, ii_gql_query))\n if \"errors\" in ii_request.json().keys():\n raise Exception(\"Query failed to run with error {}. {}\".format(ii_request.json()['errors'], ii_gql_query))\n self.last_request = ii_request\n\n # Parse the response for this pagination\n json = ii_request.json()['data']['search']\n if ii == 0:\n if json['issueCount'] == 0:\n print(\"Found no entries for query.\")\n self.data = pd.DataFrame()\n return\n\n n_pages = int(np.ceil(json['issueCount'] / n_per_page))\n print(\"Found {} items, which will take {} pages\".format(json['issueCount'], n_pages))\n prog = widgets.IntProgress(\n value=0,\n min=0,\n max=n_pages,\n description='Downloading:',\n bar_style='',\n )\n if n_pages > 1 and self.display_progress:\n display(prog)\n\n # Add the JSON to the raw data list\n self.issues_and_or_prs.extend(json['nodes'])\n pageInfo = json['pageInfo']\n self.last_query = ii_gql_query\n\n # Update progress and should we stop?\n prog.value += 1\n if pageInfo['hasNextPage'] is False:\n prog.bar_style = 'success'\n break\n\n # Create a dataframe of the issues and/or PRs\n self.data = pd.DataFrame(self.issues_and_or_prs)\n\n # Add some extra fields\n self.data['author'] = self.data['author'].map(lambda a: a['login'] if a is not None else a)\n self.data['org'] = self.data['url'].map(lambda a: a.split('/')[3])\n self.data['repo'] = self.data['url'].map(lambda a: a.split('/')[4])",
"def filter_blog_content_by_search(self, query, page, per_page_number, context=\"title\"):\n if context == 'title':\n FoodBlogs = self.filter_model_content(title=query)\n else:\n FoodBlogs = self.filter_model_content()\n\n paginator = Paginator(FoodBlogs, per_page_number)\n blogs = ''\n try:\n blogs = paginator.get_page(page)\n except PageNotAnInteger:\n blogs = paginator.get_page(1)\n except EmptyPage:\n blogs = ''\n finally:\n return blogs",
"def log_source_search(request):\n\n try:\n return 200, search_log_source_by_keyword(request.body)\n except ValueError as e:\n return 404, {'error': repr(e)}\n except HTTPError as e:\n return 404, {'error': repr(e)}\n except Exception as unknown_exception:\n return 500, {'error': repr(unknown_exception)}",
"def test_search_posts_no_query(self):\n response = self.client.get('/blog/search/?blog_q=')\n self.assertEqual(response.url, \"/blog/\")\n context = response.context\n self.assertFalse(context)",
"def search():\n #get the name given\n name = request.args.get('q')\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n if name:\n found_events = Events.get_events_by_name(name, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events matching the given name\"}), 404\n return jsonify({\"message\" : \"can not search events, provide event name\"}), 400",
"def test_search_page_exists(self):\n\n #get_request = self.rf.get(\"/search/\")\n response = self.c.get(\"/search/\")\n self.assertEqual(response.status_code, 200)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the post content when the request is a GET. If a POST is given, it will try to save a comment for the POST, according to the values passed. request The request from the client. str The post header. The return will always be the template file "detail.html".
|
def show_post(request, str):
# It's guaranteed to always have a unique header.
post = get_object_or_404(Post, header=str)
comments = post.comment_set.order_by("-comment_date")
# Save a comment to given post.
if (request.method == "POST"):
# Because we add a few things at the comment creation,
# we pass it with the data argument.
comment_form = CommentForm(data={
"post": post,
"comment_date": datetime.now(),
"author": request.user,
"content": request.POST["content"],
"reply": request.POST["reply"],
})
if (comment_form.is_valid()):
comment_form.save()
comment_form = CommentForm()
else :
comment_form = CommentForm()
return render(request, "detail.html", {
"post": post,
"comment_form": comment_form,
"comments": comments
})
|
[
"def get(self, request, pk, *args, **kwargs):\n post = Post.objects.get(pk=pk)\n form = CommentForm()\n comments = Comment.objects.filter(post=post).order_by('-created_on')\n\n context = {\n 'post': post,\n 'form': form,\n 'comments': comments,\n }\n\n return render(request, 'social/post_detail.html', context)",
"def detail(request, slug):\n submission = get_object_or_404(Submission.admin_manager, slug=slug)\n if submission.censored and submission.censored_url:\n return HttpResponseRedirect(submission.censored_url)\n if not submission.allows_viewing_by(request.user):\n return HttpResponseForbidden(_('access denied') + '')\n\n last_new_comment_id = request.session.get(DEMOS_LAST_NEW_COMMENT_ID, None)\n if last_new_comment_id:\n del request.session[DEMOS_LAST_NEW_COMMENT_ID]\n\n more_by = Submission.objects.filter(creator=submission.creator)\\\n .exclude(hidden=True)\\\n .order_by('-modified').all()[:5]\n\n return jingo.render(request, 'demos/detail.html', {\n 'submission': submission,\n 'last_new_comment_id': last_new_comment_id,\n 'more_by': more_by\n })",
"def blog_post_detail(request, slug, template=\"blog/blog_post_detail.html\"):\n blog_posts = BlogPost.objects.published(for_user=request.user)\n blog_post = get_object_or_404(blog_posts, slug=slug)\n # Handle comments\n comment_parts = handle_comments(blog_post, request)\n posted_comment_form, unposted_comment_form, response = comment_parts\n if response is not None:\n return response\n context = {\"blog_page\": blog_page(), \"blog_post\": blog_post,\n \"posted_comment_form\": posted_comment_form,\n \"unposted_comment_form\": unposted_comment_form}\n templates = [\"blog/blog_post_detail_%s.html\" % slug, template]\n request_context = RequestContext(request, context)\n t = select_template(templates, request_context)\n return HttpResponse(t.render(request_context))",
"def requestview(request):\n return render(request, \"request.html\")",
"def get(self):\n return render_template('blog/create.html', form=PostForm())",
"def get(self):\n self.render('post-new.html', user=self.user)",
"def render_new_request_page():\n title = 'New Request'\n return render_template('new_request.html', page_title=title)",
"def get_content(post, request=None):\n return blogtools.get_post_body(post)",
"def post(self):\r\n title = self.request.get(\"subject\")\r\n content = self.request.get(\"content\")\r\n if title and content:\r\n add_to_store = BlogPosts(title = title, blogpost = content)\r\n newpost = add_to_store.put()\r\n self.redirect(\"/blog/\" + str(newpost.id()))\r\n else:\r\n self.render(\"/newpost.html\", title = title, post = content, error = \"Title and Content Please\")",
"def post(self, *args, **kwargs):\n\n return self.render()",
"def show_comment_form(self, req, page):\n page_id = self.env.get_real_filename(page)[:-4]\n ajax_mode = req.args.get('mode') == 'ajax'\n target = req.args.get('target')\n page_comment_mode = not target\n\n form_error = preview = None\n title = req.form.get('title', '').strip()\n if 'author' in req.form:\n author = req.form['author']\n else:\n author = req.session.get('author', '')\n if 'author_mail' in req.form:\n author_mail = req.form['author_mail']\n else:\n author_mail = req.session.get('author_mail', '')\n comment_body = req.form.get('comment_body', '')\n fields = (title, author, author_mail, comment_body)\n\n if req.method == 'POST':\n if req.form.get('preview'):\n preview = Comment(page_id, target, title, author, author_mail,\n comment_body)\n # 'homepage' is a forbidden field to thwart bots\n elif req.form.get('homepage') or self.antispam.is_spam(fields):\n form_error = 'Your text contains blocked URLs or words.'\n else:\n if not all(fields):\n form_error = 'You have to fill out all fields.'\n elif _mail_re.search(author_mail) is None:\n form_error = 'You have to provide a valid e-mail address.'\n elif len(comment_body) < 20:\n form_error = 'You comment is too short ' \\\n '(must have at least 20 characters).'\n else:\n # '|none' can stay since it doesn't include comments\n self.cache.pop(page_id + '|inline', None)\n self.cache.pop(page_id + '|bottom', None)\n comment = Comment(page_id, target,\n title, author, author_mail,\n comment_body)\n comment.save()\n req.session['author'] = author\n req.session['author_mail'] = author_mail\n if ajax_mode:\n return JSONResponse({'posted': True, 'error': False,\n 'commentID': comment.comment_id})\n return RedirectResponse(comment.url)\n\n output = render_template(req, '_commentform.html', {\n 'ajax_mode': ajax_mode,\n 'preview': preview,\n 'suggest_url': '@edit/%s/' % page,\n 'comments_form': {\n 'target': target,\n 'title': title,\n 'author': author,\n 'author_mail': author_mail,\n 'comment_body': comment_body,\n 'error': form_error\n }\n })\n\n if ajax_mode:\n return JSONResponse({\n 'body': output,\n 'error': bool(form_error),\n 'posted': False\n })\n return Response(render_template(req, 'commentform.html', {\n 'form': output\n }))",
"def comment_data_to_save(self, request, id: str) -> str:\n data = dict()\n data[\"author\"] = request.form['author']\n data[\"text\"] = request.form['text']\n data[\"post\"] = id \n return str(data)",
"def restaurant_post(request, post):\n\tpost = Post.objects.get(pk=post)\n\n\tt = post.time.astimezone(pytz.timezone(\"America/New_York\"))\n\ttime = datetime.datetime.strftime(t, \"%d.%m.%y %H:%M\")\n\n\tcomments = Comment.objects.filter(post=post)\n\tcontext = {\"post\": post, \"time\": time, \"comments\": comments}\n\treturn render(request, \"foodblog/restaurant_post.html\", context)",
"def blog_detail(request, pk):\n post = Post.objects.get(pk=pk)\n context = {\n \"post\": post,\n }\n\n return render(request, \"blog/blog_detail.html\", context)",
"def single_document_details(request, id):\n document = Document.objects.get(id=id)\n return render(request, 'html/detail.html', {'document': document})",
"def get_entry(request, title):\n item_of_interest = util.get_entry(title)\n markdowner = Markdown()\n new = markdowner.convert(item_of_interest)\n return render(request, \"encyclopedia/get_entry.html\", {\n \"entry\": new,\n \"title\": title.capitalize(),\n })",
"def _new_blog_post(self, req):\n action = req.args.get('action', 'edit')\n pg_name_fmt = self.env.config.get('blog', 'page_format', \n '%Y/%m/%d/%H.%M')\n wikitext = req.args.get('text', '')\n blogtitle = req.args.get('blogtitle', '')\n pagename = req.args.get('pagename', pg_name_fmt) \n pagename = time.strftime(pagename)\n if '%@' in pagename and blogtitle: \n urltitle = re.sub(r'[^\\w]+', '-', blogtitle).lower() \n pagename = pagename.replace('%@', urltitle) \n while '-' in pagename and len(pagename) > 60: \n pagename = '-'.join(pagename.split('-')[:-1]) \n pagename = pagename.strip('-')\n if '$U' in pagename:\n pagename = pagename.replace('$U', req.authname)\n comment = req.args.get('comment', '')\n readonly = int(req.args.has_key('readonly'))\n edit_rows = int(req.args.get('edite_rows', 20))\n req_tags = req.args.get('tags', [])\n \n if req.method == 'POST':\n if action == 'edit':\n if req.args.has_key('cancel'):\n req.redirect(self.env.href.blog())\n page = WikiPage(self.env, pagename, None)\n tags = TagEngine(self.env).tagspace.wiki\n if req.args.has_key('preview'):\n req.hdf['blog.action'] = 'preview'\n self._render_editor(req, page, self.env.get_db_cnx(),\n preview=True) \n else:\n titleline = ' '.join([\"=\", blogtitle, \"=\\n\"])\n if blogtitle:\n page.text = ''.join([titleline, wikitext])\n else:\n page.text = wikitext\n page.readonly = readonly\n page.save(req.authname, comment, req.remote_addr)\n# taglist = [x.strip() for x in req_tags.split(',') if x]\n taglist = [t.strip() for t in \n _tag_split.split(req.args.get('tags')) \n if t.strip()]\n tags.add_tags(req, pagename, taglist)\n req.redirect(self.env.href.blog())\n else:\n info = {\n 'title' : blogtitle,\n 'pagename': pagename,\n 'page_source': wikitext,\n 'comment': comment,\n 'readonly': readonly,\n 'edit_rows': edit_rows,\n 'scroll_bar_pos': req.args.get('scroll_bar_pos', '')\n }\n req.hdf['blog'] = info\n req.hdf['title'] = 'New Blog Entry'\n tlist = req.args.getlist('tag')\n if not tlist:\n tlist = [self.env.config.get('blog', 'default_tag', 'blog')]\n req.hdf['tags'] = ', '.join(tlist)\n pass",
"def show_post_details(post_id):\n post = Post.query.get(post_id)\n nice_date = post.format_date\n tags = post.tags\n\n return render_template('/post_detail.html', post=post, post_date=nice_date, tags=tags)",
"def render(self, request):\n IP = request.getClientIP()\n html = \"\"\n html += \"<html>Hello, world!</html><br><br>\"\n html += \"Keys are...<br>\"\n for key in request.args.keys():\n html += \"%s \" % key\n html += \"<br>uri = %s<br>\" % request.uri\n html += \"<br>method = %s<br>\" % request.method\n html += \"<br>path = %s<br>\" % request.path\n \n field_value = request.args.get('Field', '')\n html += \"<br>Field = %s<br>\" % field_value\n html += \"<br>ClientIP = %s<br>\" % IP\n button_val = request.args.get('name_submit','') \n html += \"<br>button_val = %s<br>\" % button_val\n form = \"\"\"\n <FORM ACTION=\".\" METHOD=\"POST\" ENCTYPE=\"application/x-www-form-urlencoded\">\n<P>Test input: <INPUT TYPE=\"TEXT\" NAME=\"Field\" SIZE=\"25\"><BR>\n<INPUT TYPE=\"SUBMIT\" NAME=\"name_submit\" VALUE=\"Submit\">\n</FORM>\n \"\"\"\n return html + form"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Substract trajectory from current instance.
|
def __sub__(self, other_traj):
return Trajectory(self.modes - other_traj.modes)
|
[
"def __sub__(self, delta):\n copy = self.__copy__(self)\n copy -= delta\n return copy",
"def __sub__(self, other):\n return Wrench(\n self.force - other.force,\n self.torque - other.torque)",
"def __sub__(self, other: Vector) -> Vector:\n return Vector(self.x - other.x, self.y - other.y, self.z - other.z)",
"def get_trajectory(self):\n return self.trajectory;",
"def __sub__(self, other):\r\n if isinstance(other, tuple) and len(other) == 2:\r\n other = XY.from_tuple(other)\r\n if not isinstance(other, XY):\r\n raise NotImplementedError(\r\n \"Only XY() or (x, y) addition implemented.\")\r\n x = self.x - other.x\r\n y = self.y - other.y\r\n return XY(x, y)",
"def __sub__(self, other: 'Wavefunction') -> 'Wavefunction':\n out = copy.deepcopy(self)\n out.ax_plus_y(-1.0, other)\n return out",
"def __sub__(self, other):\n if isinstance(other, Rotator):\n return Rotator(self.x - other.x, self.y - other.y, self.z - other.z, radians=True)\n elif isinstance(other, numbers.Real):\n return Rotator(self.x - other, self.y - other, self.z - other, radians=True)\n else:\n return NotImplemented",
"def __sub__(self, other):\n objects = []\n for i in range(len(self.objects)):\n new_object = self.objects[list(self.objects.keys())[i]] - other\n objects.append(new_object)\n \n return AutoDiffVector(objects)",
"def _undo_trajectory(self):\n for t in self._traj:\n self._mask.__setitem__(t, 0)",
"def __sub__(self, other):\n twins = []\n OK = self.good\n if isinstance(other, CCD):\n OK = OK and other.good\n for win,owin in zip(self._data,other._data):\n twins.append(win - owin)\n else:\n for win in self._data:\n twins.append(win - other)\n return CCD(twins, self.time, self.nxmax, self.nymax, OK, self.head)",
"def __sub__(self, pPos2):\n return _almathswig.Pose2D___sub__(self, pPos2)",
"def adjoint(self):\n with qml.tape.stop_recording():\n new_tape = self.copy(copy_operations=True)\n new_tape.inv()\n\n # the current implementation of the adjoint\n # transform requires that the returned inverted object\n # is automatically queued.\n with QuantumTape._lock:\n QueuingContext.append(new_tape)\n\n return new_tape",
"def __sub__(self, other):\n obj = self._to_complex(other)\n return self.__add__(-obj)",
"def __sub__(self, offset):\n return self + -offset",
"def _undo_trajectory(self):\n for t in self._traj:\n self._mask[t] = 0",
"def __sub__(self, other):\n if not all(np.equal(self.x, other.x)):\n raise ValueError(\"X axis values are not compatible!\")\n return self.__class__(self.x, self.y - other.y, *self._args,\n **self._kwargs)",
"def __sub__(self, other):\n\n if isinstance(other, Quaternion):\n\n real = self._real - other._real\n imag_i = self._i - other._i\n imag_j = self._j - other._j\n imag_k = self._k - other._k\n\n return Quaternion(real, imag_i, imag_j, imag_k)\n\n else:\n return self - other",
"def __sub__(self, other):\n try:\n return Temperature(self.celsius - other.celsius)\n except AttributeError:\n return Temperature(self.celsius - other)",
"def remove_tiptilt(self):\n plane = fit_plane(self.x, self.y, self.phase)\n self.phase -= plane\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Evaluate (approximate) equality of trajectory and current instance.
|
def __eq__(self, other_traj, rtol = 1e-5, atol = 1e-8):
return np.allclose(self.modes, other_traj.modes, rtol = rtol, atol = atol)
|
[
"def is_approx_equal(self, other):\n raise NotImplementedError",
"def __eq__(self, other):\n if isinstance(other, Solution):\n equalities = np.isclose(self.x, other.x, rtol=Solution.eps, atol=Solution.eps)\n return np.all(equalities)\n else:\n raise InvalidComparison('Attempted to compare instance with nonSolution instance.')",
"def __eq__(self, pT2):\n return _almathswig.Transform___eq__(self, pT2)",
"def __eq__(self, other):\n return (isinstance(other, self.__class__)\\\n and (self._lhs == other._lhs) \\\n and (self._rhs == other._rhs) \\\n and (self._phi_c == other._phi_c) )",
"def _ve_eq_ ( self , other ) :\n if isinstance ( other , VE ) :\n v1 = self .value()\n v2 = other.value()\n return _is_equal_ ( v1 , v2 ) and _is_equal_ ( self.cov2() , other.cov2() )\n elif _is_zero_ ( self.cov2() ) :\n return _is_equal_ ( float ( self ) , float ( other ) ) \n else :\n raise NotImplementedError ( ' Equality for %s and %s is not implemented' % ( self , other ) )",
"def __eq__(self, a):\n if ~isinstance(a, tm):\n return False\n if np.all(self.TAA == a.TAA):\n return True\n return False",
"def __eq__(self, *args):\n return _snap.TRnd___eq__(self, *args)",
"def approx_equals(a, b):\n return (a - b) < 1.5e-16",
"def exact_compare(m1: Minutia, m2: Minutia):\n if m1.x == m2.x and m1.y == m2.y and m1.theta == m2.theta:\n return True\n else:\n return False",
"def __eq__(self, other):\n\n if isinstance(other, Quaternion):\n\n if self._real == other._real and self._i == other._i and self._j == other._j and self._k == other._k:\n equals = True\n else:\n equals = False\n\n return equals\n\n else:\n raise TypeError(f\"Cannot compare '{type(self)}' with '{type(other)}' \")",
"def __eq__(self, rhs):\n return self.getTime() == rhs.getTime()",
"def equality_add(equality_func, obj):\r\n result = self.equality(self, obj)\r\n\r\n # collation point condition\r\n for i in range(self.number_of_section):\r\n D = self.D\r\n derivative = np.zeros(0)\r\n for j in range(self.number_of_states[i]):\r\n state_temp = self.states(j, i) / self.unit_states[i][j]\r\n derivative = np.hstack((derivative, D[i].dot(state_temp)))\r\n tix = self.time_start(i) / self.unit_time\r\n tfx = self.time_final(i) / self.unit_time\r\n dx = self.dynamics[i](self, obj, i)\r\n result = np.hstack((result, derivative - (tfx - tix) / 2.0 * dx))\r\n \r\n # knotting condition\r\n for knot in range(self.number_of_section - 1):\r\n if (self.number_of_states[knot] != self.number_of_states[knot + 1]):\r\n continue # if states are not continuous on knot, knotting condition skip\r\n for state in range(self.number_of_states[knot]):\r\n param_prev = self.states(state, knot) / self.unit_states[knot][state]\r\n param_post = self.states(state, knot + 1) / self.unit_states[knot][state]\r\n if (self.knot_states_smooth[knot]):\r\n result = np.hstack((result, param_prev[-1] - param_post[0]))\r\n\r\n return result",
"def test_equality(self):\n other = deepcopy(self.atom)\n self.assertEqual(self.atom, self.atom)\n self.assertEqual(self.atom, other)\n\n other.coords_fractional = self.atom.coords_fractional + 1\n self.assertNotEqual(self.atom, other)",
"def __eq__(self, *args):\n return _snap.TVoid___eq__(self, *args)",
"def __eq__(self, other: Transform) -> bool:\n if len(self.transforms) != len(other.transforms):\n return False\n else:\n return all([a == b for a, b in zip(self.transforms, other.transforms)])",
"def __eq__(self, other: \"Airfoil\") -> bool:\n if other is self: # If they're the same object in memory, they're equal\n return True\n\n if not type(self) == type(other): # If the types are different, they're not equal\n return False\n\n # At this point, we know that the types are the same, so we can compare the attributes\n return all([ # If all of these are true, they're equal\n self.name == other.name,\n np.allclose(self.coordinates, other.coordinates),\n ])",
"def equal_position_and_time_step(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal(other.get_single_agent_states()[i]):\n return False\n return True",
"def __eq__(self, other: Any) -> bool:\n\n # Checks if self and other are the same type, then compares their\n # point values if they are the same type\n return (type(self) == type(other) and self.x == other.x and\n self.y == other.y)",
"def __eq__(self, other):\n return (other.atom_id_no_altloc() == self.atom_id_no_altloc())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the element(s) of the modes indexed by the given key.
|
def __getitem__(self, key):
return self.modes[key]
|
[
"def find(self, key):\n return list(self.iterate(key))",
"def elements_in_set(self, key) -> List:\n root = self.find(key)\n return [r for r in self.data if self.find(r) == root]",
"def get_list(self, key):\n return self.__settings[key]",
"def search(self, key):\n\n symbols=[]\n with self._lock:\n for symbol in self.all():\n try:\n try:\n if key.match(symbol.tag):\n symbols.append(symbol)\n except:\n if key in symbol.tag:\n symbols.append(symbol)\n except:\n pass\n return symbols",
"def lookup(self, key):\n return self.filter(lambda x: x[0] == key).values().collect()",
"def modes(self):\n return self.get_attr_set('modes')",
"def _mode_subset(signal, freq, rate, main_freq, samples, modes=[1], width=0.2):\r\n # Compute the FFT.\r\n amp = fft_amp(signal, samples=samples)\r\n\r\n # Calculate resolution in frequency domain.\r\n res = (freq[1] - freq[0])\r\n\r\n for mode in modes:\r\n m_name = f'm{mode}'\r\n\r\n # Search limits indexes\r\n lower = int(round(main_freq * (mode - width) / res))\r\n upper = int(round(main_freq * (mode + width) / res))\r\n\r\n yield m_name, freq[lower: upper], amp[lower: upper]",
"def keys(self):\n return [k for k in self.nml if k == self.key]",
"def iterateKey(self, key):\n if key and key in self._keyed:\n for v in self._keyed[key]: yield (key, v)\n for v in self._wild: yield (None, v)\n return",
"def mode_pattern(mode_number: int) -> List[int]:\n return MODE_PATTERNS[mode_number]",
"def modes_list(modes):\n a = modes % 10\n b = (modes % 100 - a) // 10\n c = (modes % 1000 - b - a) // 100\n return [a, b, c]",
"def preset_modes(self) -> list:\n try:\n return list(self._ctrl_params['mode'].keys())\n except KeyError:\n return []",
"def mode(self) -> List[DataValue]:\n return mode(self.iterable)",
"def get_indices(self, modes: str, *indices: int) -> Union[int, List[int]]:\n logger = logging.getLogger(__name__)\n output = []\n for mode, index in zip(reversed(modes), indices):\n\n logger.warning(\"Getting value %r: %d\", mode, index)\n if mode == \"0\":\n index = self[index]\n logger.warning(\" from position: %d\", index)\n elif mode == \"1\":\n pass\n elif mode == \"2\":\n index = self[index]+self.offset\n logger.warning(\" using relative base %d\", self.offset)\n logger.warning(\" from position: %d\", index)\n\n output.append(index)\n logger.warning(\" referencing value: %d\", self[index])\n\n if len(output) == 1:\n output = output[0]\n return output",
"def _key_set(self):\n return set(GetKey(t) for t in self._m)",
"def select(self, table, cols, mode='list', key_filter=True):\n if cols is None:\n cols = [c.name for c in self.relations[table]]\n rows = self.read_table(table, key_filter=key_filter)\n for row in select_rows(cols, rows, mode=mode):\n yield row",
"def get_adj_neighbor(self, key):\n return set(self.graph[key]) # directional adj",
"def get_bool_array(self, key):\n if isinstance(key, KeyFlag):\n return self[key]\n raise TypeError(\"key={} not an lsst.afw.table.KeyFlag\".format(key))",
"def get_array(self,key,*args):\n if not args:\n return numpy.array([getattr(p,key) for p in self.panels])\n else:\n return [self.get_array(k) for k in (key,)+args]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the value(s) of the modes indexed by the given key.
|
def __setitem__(self, key, value):
self.modes[key] = value
|
[
"def csi_set_modes(self, modes, qmark, reset=False):\n flag = not reset\n\n for mode in modes:\n self.set_mode(mode, flag, qmark, reset)",
"def __getitem__(self, key):\r\n return self.modes[key]",
"def ctx_set(flags: Flags, fdict: FlagDict, key: str, value: Any):\n key = ctx_fix_key(flags, fdict, key) or key\n fdict[key] = value",
"def __setitem__(self, key, value):\n self.fcmdict[key] = value",
"def direct_set(self, key: str, value):\n set_store_value(self.store, key, value)",
"def setitem(self, key, value):",
"def set_option(self, key, value):\n self.options.set(key, value)",
"def SetKeyword(key, value):",
"def set_flags(self, key, mask):\n if mask == 0:\n if key in self.flags:\n del self.flags[key]\n return\n self.flags[key] = mask",
"def set_val(self, key, val, extra_data):\n raise NotImplementedError",
"def set(self, key, value, *namespaces):\n nskey = pack_ns(key, *namespaces)\n self[nskey] = value",
"def setvalue(\n self,\n flows: typing.Sequence[mitmproxy.flow.Flow],\n key: str, value: str\n ) -> None:\n updated = []\n for f in flows:\n self.settings[f][key] = value\n updated.append(f)\n ctx.master.addons.trigger(\"update\", updated)",
"def set_generic(self, _key: str, _type, _value):\n set_func = {\n \"bool\" : self.set_bool,\n \"float\" : self.set_float,\n \"int\" : self.set_int,\n \"point\" : self.set_point,\n \"points\": self.set_points,\n \"str\" : self.set_str\n }\n\n # noinspection PyArgumentList\n set_func.get(_type)(_key, _value)",
"def setModeFromMODCOD(self, index):\n self.setMode(self.fromMODCOD(index))",
"def setOption(self, key, value):\n if self.readyMoves:\n log.warning(\n \"Options set after 'readyok' are not sent to the engine\",\n extra={\"task\": self.defname},\n )\n if key == \"cores\":\n self.optionQueue.append(\"cores %s\" % value)\n elif key == \"memory\":\n self.optionQueue.append(\"memory %s\" % value)\n elif key.lower() == \"ponder\":\n self.__setPonder(value == 1)\n else:\n self.optionQueue.append(\"option %s=%s\" % (key, value))",
"def __setitem__(self, key, value):\n\n self._v_file._checkWritable()\n\n if is_idx(key):\n # If key is not a sequence, convert to it\n coords = [key]\n value = [value]\n elif isinstance(key, slice):\n (start, stop, step) = self._processRange(\n key.start, key.stop, key.step )\n coords = range(start, stop, step)\n # Try with a boolean or point selection\n elif type(key) in (list, tuple) or isinstance(key, numpy.ndarray):\n coords = self._pointSelection(key)\n else:\n raise IndexError(\"Invalid index or slice: %r\" % (key,))\n\n # Do the assignment row by row\n self._assign_values(coords, value)",
"def set_value(self, key, value):\n self.obj[key] = value",
"def set(self, key, value):\n logger.debug(\"setting '%s' = '%s' on network\", key, value)\n dkey = digest(key)\n\n def store(nodes):\n logger.debug(\"setting '%s' on %s\", key, nodes)\n ds = [self.protocol.call_store(node, dkey, value)\n for node in nodes]\n return future_list(ds, self._any_respond_success)\n\n node = DHTNode(dkey)\n nearest = self.protocol.router.find_neighbors(node)\n if len(nearest) == 0:\n logger.warning(\"There are no known neighbors to set key %s\", key)\n future = asyncio.Future()\n future.set_result(False)\n return future\n spider = NodeSpiderCrawl(self.protocol, node, nearest,\n self.ksize, self.alpha)\n nodes = spider.find()\n while type(nodes) != list:\n nodes = yield from nodes\n\n return store(nodes)",
"def _set_mode(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'IPV6': {}, 'IPV4': {}, 'MIXED': {}},), is_leaf=True, yang_name=\"mode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"mode must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-routing-policy:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'IPV6': {}, 'IPV4': {}, 'MIXED': {}},), is_leaf=True, yang_name=\"mode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__mode = t\n if hasattr(self, '_set'):\n self._set()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a new trajectory with rounded modes.
|
def __round__(self, decimals = 6):
return Trajectory(np.around(self.modes, decimals = decimals))
|
[
"def get_trajectory(self):\n start_point = [0.3, 0, 0.295]\n end_point = [0.5, -0.2, 0.295] \n\n milestones = np.array([start_point, end_point])\n self.num_waypoints = np.size(milestones, 0)\n\n return trajectory.Trajectory(milestones=milestones)",
"def __round__(self, ndigits=None):\n return Quaternion(\n round(self.real, ndigits), round(self.i, ndigits),\n round(self.j, ndigits), round(self.k, ndigits))",
"def trajectoryCreation(self):\n stepX = -(self.x - self.aimPosX)\n stepY = -(self.y - self.aimPosY)\n\n for i in range(0, round(10 / self.speed)):\n self.path.append((stepX * (i / round(10/self.speed)), stepY * (i / round(10/self.speed))))",
"def makeCurve(self):\n pos = list()\n # Need to change to take first two coordinates of track, and extend this vector out in the opposite direction.\n pos.append((cmds.getAttr(self._trackway[0]+\".translateX\")-100, 100, cmds.getAttr(self._trackway[0]+\".translateZ\")-100))\n\n for track in self._trackway:\n pos.append(((cmds.getAttr(track+\".translateX\")), 0, cmds.getAttr(track+\".translateZ\")))\n\n # Get reverse iterator for current list of tracks\n it = reversed(pos)\n\n # Get list of last two points in pos list.\n last_vec = [pos[-2], pos[-1]]\n\n dir_vec = tuple([(last_vec[1][i]-last_vec[0][i])/2 for i in range(3)])\n norm_vec = tuple([-1.5*dir_vec[2], 0, 1.5*dir_vec[0]])\n center_pt = (cmds.getAttr(self._trackway[-1]+\".translateX\")+dir_vec[0],0, cmds.getAttr(self._trackway[-1]+\".translateZ\")+dir_vec[2])\n\n # Put three corners of turnaround into pos list.\n pos.append(tuple([center_pt[i]+norm_vec[i] for i in range(3)]))\n pos.append(tuple([center_pt[i]+dir_vec[i] for i in range(3)]))\n pos.append(tuple([center_pt[i] - norm_vec[i] for i in range(3)]))\n\n # Put reversed initial pos list into pos.\n rev = list()\n try:\n while True:\n rev.append(it.next())\n except StopIteration:\n pass\n pos += rev\n\n # Set _trackWayCurve to created curve based on the pos list, and move the track to _camElevation.\n self._trackWayCurve = cmds.curve(name='camCurve',p=pos)\n self._baseTrackwayCurve = cmds.duplicate(self._trackWayCurve, name='baseCurve')\n cmds.setAttr(self._trackWayCurve+\".translateY\", self._camElevation)",
"def getSmoothTrapezoid(self, *args):\n return _almathinternal.InterpolationTrapezoid_getSmoothTrapezoid(self, *args)",
"def round(self):\n for point in self.points:\n point.round()",
"def round_state(self, state):\n\n return np.around(state, 3)",
"def pre_radius(self):\n\n forcing_radius_start = None if self.force_radius_start.m == 8392 else (\n self.force_radius_start.m)\n forcing_radius_end = None if self.force_radius_end.m == 8392 else (\n self.force_radius_end.m)\n\n (rad_start, rad_end) = cut_rad(\n cutoff=self.cutoff,\n gsigma=self.gsigma,\n mode=self.mode,\n force_radius_start=forcing_radius_start,\n force_radius_end=forcing_radius_end,\n )\n if self.force_radius_step.m != 8392:\n nbins = int((rad_end-rad_start)/self.force_radius_step.m) + 2\n rad_end = rad_start + (nbins-1)*self.force_radius_step.m\n else:\n nbins = self.nbins\n\n if self.spacing == \"logspace\":\n radius = np.logspace(\n np.log10(rad_start),\n np.log10(rad_end),\n np.array(nbins).sum()\n )\n elif self.spacing == \"linspace\":\n radius = np.linspace(\n rad_start,\n rad_end,\n np.array(nbins).sum()\n )\n else:\n raise ValueError(\"Spacing must be 'logspace' or 'linspace'!\")\n\n return radius*u.m",
"def circularTiling(pts, mode=UNDERFILL, serpentine=False, fov = 0.5):\n\tres = semstg.CircularTiling(pts, mode, serpentine)\n\tres.setTileDimension([fov,fov])\n\treturn res",
"def get_trajectory(self) -> Tuple[int, int, int]:\n base_trajectory: Tuple[int, int, int]\n if type(self.action) is MoveShip:\n raise ValueError(\"Not valid for MoveShip action\")\n\n self.action = cast(MoveWaypoint, self.action)\n base_trajectory = self.action.get_trajectory_base()\n\n r, c, a = base_trajectory # row, col, angle\n # For a(ngle), the value is transformed to represent quarter turns with 1\n return (r * self.value, c * self.value, a * (self.value // 90))",
"def test_compass_rounded():\n expected = {\n 10: 0,\n 40: 0,\n 44: 0,\n 45: 90,\n 46: 90,\n 89: 90,\n 90: 90,\n 91: 90,\n 134: 90,\n 135: 180,\n 136: 180,\n 179: 180,\n 180: 180,\n 181: 180,\n 224: 180,\n 225: 270,\n 226: 270,\n 269: 270,\n 270: 270,\n 271: 270,\n 314: 270,\n 315: 0, # all angles are modulo 360\n 316: 0,\n 359: 0,\n 360: 0,\n 361: 0,\n 0: 0,\n -1: 0,\n -44: 0,\n -45: 270,\n -46: 270,\n -89: 270,\n -90: 270,\n -91: 270,\n -134: 270,\n -135: 180,\n -136: 180\n }\n\n for angle, rounded in expected.items():\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.get_rotation_simple() == rounded",
"def border_radius(self, value: BorderRadius) -> 'Tailwind':\n self.element.classes('rounded-' + value)\n return self",
"def extrapolate_circular(src, dst):\n\n return extrapolate(src, dst, BorderType.Circular)",
"def from_trajectory(cls, trajectory, nb_set_and_cutoff, max_search_depth = 32 , delta_Step = 1, first_frame = 0, parallel = False):\n ring_class = cls(max_search_depth = max_search_depth) # initialize class\n nb_set_and_cutoff_list = [nb_set_and_cutoff for i in range(len(trajectory))]\n step = amof.trajectory.construct_step(delta_Step=delta_Step, first_frame = first_frame, number_of_frames = len(trajectory))\n ring_class.compute_ring(trajectory, nb_set_and_cutoff_list, step, parallel)\n return ring_class # return class as it is a constructor",
"def rounded_box_path(context, radii):\r\n x, y, w, h, tl, tr, br, bl = radii\r\n\r\n if 0 in tl:\r\n tl = (0, 0)\r\n if 0 in tr:\r\n tr = (0, 0)\r\n if 0 in br:\r\n br = (0, 0)\r\n if 0 in bl:\r\n bl = (0, 0)\r\n\r\n if (tl, tr, br, bl) == 4 * ((0, 0),):\r\n # No radius, draw a rectangle\r\n context.rectangle(x, y, w, h)\r\n return\r\n\r\n context.move_to(x, y)\r\n for i, (w, h, (rx, ry)) in enumerate((\r\n (0, 0, tl), (w, 0, tr), (w, h, br), (0, h, bl))):\r\n context.save()\r\n context.translate(x + w, y + h)\r\n radius = max(rx, ry)\r\n if radius:\r\n context.scale(min(rx / ry, 1), min(ry / rx, 1))\r\n context.arc(\r\n (-1 if w else 1) * radius, (-1 if h else 1) * radius, radius,\r\n (2 + i) * math.pi / 2, (3 + i) * math.pi / 2)\r\n context.restore()",
"def get_integrated_trajectory(method):\n\n # create trajectory\n trajectory = SpringTrajectoryGenerator()\n # get motion timestamps\n times = trajectory.times\n start_position = trajectory.start_position\n start_velocity = trajectory.get_start_velocity()\n # get analytical accelerations\n accelerations = trajectory.get_analytical_accelerations()\n # numerical integrate acceleration to get velocities\n velocities = method(times, accelerations, start_velocity)\n # numerical integrate velocities to get trajectory\n integrated_trajectory = method(times, velocities, start_position)\n return integrated_trajectory",
"def _rad2gon(value):\n return value / math.pi * 200.0",
"def get_pose(self, t, method='cubic', bspline_smoothness=10):\n if method == 'kmf' or method == 'kms':\n if method == 'kmf' and self.kf_filtered_means is None:\n self.__generate_kf_filter_estimate()\n elif method == 'kms' and self.kf_smoothed_means is None:\n self.__generate_kf_smoother_estimate()\n\n timestamps_known = self.kf_timestamps\n else:\n timestamps_known = self.gps_t\n\n # TODO Instead of not accepting these values extend the interpolation before and after the known time range\n # The pose for the first timestamp or preceding timestamps cannot be estimated\n assert(timestamps_known[0] <= np.min(t))\n # The pose for the last timestamp or succeeding timestamps cannot be estimated\n assert(np.max(t) <= timestamps_known[-1])\n\n epsilon_t = np.min(np.diff(timestamps_known)) / 100\n t_next = t + epsilon_t;\n pos = self.get_position(t, method=method, bspline_smoothness=bspline_smoothness)\n pos_next = self.get_position(t_next, method=method, bspline_smoothness=bspline_smoothness)\n\n pos_diff = pos_next - pos\n\n # TODO Currently the orientation is calculated by disregarding the z-axis (so there is really just a yaw angle calculated)\n base_x = np.array([1.0, 0.0, 0.0])\n\n # Discard z coordinate\n v1 = base_x[0:2]\n v2 = pos_diff[0:2]\n\n # Calculate angle between v1 and v2\n yaw = np.math.atan2(np.linalg.det([v1,v2]), np.dot(v1,v2))\n\n orientation_offset = tf.euler.euler2quat(0, np.deg2rad(90), np.deg2rad(-90), 'rxyz')\n orientation = tf.quaternions.qmult(tf.euler.euler2quat(yaw, 0, 0, 'rzyx'), orientation_offset)\n\n return pos.T, orientation",
"def correct_trajectory(trajectory, error):\n rn, _, rp = earth.principal_radii(trajectory.lat, trajectory.alt)\n\n result = trajectory.copy()\n result['lat'] -= np.rad2deg(error.north / rn)\n result['lon'] -= np.rad2deg(error.east / rp)\n result['alt'] += error.down\n result[['VN', 'VE', 'VD']] -= error[['VN', 'VE', 'VD']]\n result[['roll', 'pitch', 'heading']] -= error[['roll', 'pitch', 'heading']]\n return result.dropna()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Question function, after user input this function the response and prints the returned result to the then calls the replay function
|
def question():
input('Ask your question and press the [Enter] button.')
answer = response()
print('\nAsking the spirits...')
for thought in range(3):
print('.', end='')
time.sleep(1)
print("\n{}\n".format(answer))
replay()
|
[
"def question(self, question):\n if self.use_STT:\n self.say(question)\n response = STT.wait_for_voice()\n else:\n naoqiutils.speak(question)\n response = raw_input(question + \"\\n> \")\n return response",
"def ask_question(question) :\n logger.info('Asking wolframalpha.')\n try :\n cprint('Hmm..Thinking....','yellow')\n api_key = 'GLHKQ7-R5V9E6GU3Y'\n client = wolframalpha.Client(api_key)\n res = client.query(question)\n answer = next(res.results).text\n if 'Wolfram|Alpha' in answer:\n answer = answer.replace('Wolfram|Alpha',bot['name'])\n if 'no data available' in answer:\n answer = wiki_search(question,1) \n # search_google(question)\n return answer\n except :\n logger.info('Wolframalpha do not know the answer.')\n answer = wiki_search(question,1)\n logger.info(answer)\n # search_google(question)\n # answer = 'check browser.'\n return answer",
"def question_2():\n \n print('\"Question: This leg nerve is the longest in the body:\"')\n time.sleep(2)\n print(\"\"\" A. Saphenous\n B. Deep peroneal\n C. Sciatic\"\"\")\n \n response = input('>>> ')\n \n while response in answer_choices:\n if response in A_answers or B_answers:\n\n print('You tap and button and immediately afterwards, the screen changes:')\n time.sleep(2)\n\n print('WRONG')\n time.sleep(1)\n\n print('At the same time, an sharp electric current runs through you!')\n time.sleep(1.5)\n\n print('Another question comes up.'\n \"\\nIt's a physics question...\")\n time.sleep(1.5)\n\n print('You decide it is too risky, since the zap is loud and can draw attention.')\n break\n\n elif response in C_answers:\n print('You tap and button and immediately afterwards, the screen changes:')\n time.sleep(2)\n\n print('CORRECT')\n time.sleep(1)\n\n print()\n print('Decoded Message: \\t', 'already')\n print()\n\n print('With that puzzle piece, you look to your device.')\n time.sleep(1.5)\n\n print('It seems that the nurse was successful as well.')\n time.sleep(1.5)\n\n print(\"So far, the password reads 'hello', 'already','are'.\")\n break\n \n while response not in answer_choices:\n print(\"Huh? How did you choose a different answer if there's only 3 buttons?\")\n time.sleep(1)\n \n question_2()\n break",
"def proquest(askquestions):\n\tif askquestions:\n\t\tanswer = raw_input(\"Tell me, do you want to go on ? (yes/no) \")\n\t\tif answer[:3] != \"yes\":\n\t\t\tsys.exit(\"Ok, bye.\")\n\t\tprint \"\"\t# to skip one line after the question.",
"def vqa_prompt(self, question, answer=None) -> str:",
"def question_and_answer(quest):\n\n ans = ''\n ans = str(raw_input(quest))\n while(1):\n try:\n if (validate_answer(ans) == 'y'):\n ret = True\n else:\n ret = False\n except Exception as e:\n continue\n break\n return ret",
"def test_print_question(self, capsys):\n app_functions.print_question(4, 3, \"sum\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"You rolled a 4 and a 3... What is the sum of 4 and 3?\"\n\n app_functions.print_question(1, 6, \"sum\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"You rolled a 1 and a 6... What is the sum of 1 and 6?\"\n\n app_functions.print_question(4, 3, \"difference\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"You rolled a 4 and a 3... What is the difference between 4 and 3?\"\n\n app_functions.print_question(1, 6, \"difference\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"You rolled a 1 and a 6... What is the difference between 1 and 6?\"",
"def generator():\n predictor = random.choice(predictions)\n print(predictor)\n\n print('✰✰✰✰✰✰✰✰✰✰✰✰✰✰' * 7)\n play_again = input(\"Do you want to know more about your future? Y/yes or N/no \").lower()\n if play_again == 'y':\n input(\"Type in your question?\")\n generator()\n else:\n print('✰✰✰✰✰✰✰✰✰✰✰✰✰✰' * 7)\n print(\"Your future looks marvelous, keep smiling!\")\n quit()",
"def user_prompt():\r\n valid_response = False\r\n while not valid_response:\r\n choice = input(\"Hello! What would you like today? (espresso/latte/cappuccino): \").lower()\r\n if choice == \"espresso\" or choice == \"latte\" or choice == \"cappuccino\" or choice == \"off\":\r\n return choice\r\n elif choice == \"report\":\r\n # TODO: 3. Print report of all coffee machine resources if \"report\" is entered\r\n print_resources()\r\n else:\r\n print(\"Invalid response.\")",
"def ask(self, question):\n\n # Set the current question\n self.data['stimulus'] = question\n\n # Connect to Cleverbot and remember the response\n resp = self._send()\n\n # Add the current question to the conversation log\n self.conversation.append(question)\n\n parsed = self._parse(resp.text)\n\n # Set data as appropriate\n if self.data['sessionid'] != '':\n self.data['sessionid'] = parsed['conversation_id']\n\n # Add Cleverbot's reply to the conversation log\n self.conversation.append(parsed['answer'])\n\n return parsed['answer'].encode('latin-1').decode('utf-8')",
"def YNQ(question, default=\"retry\"):\n valid = {\n 'retry': 2, 'retr': 2, 'ret': 2, 're':2, 'r':2,\n 'yes' : 1, 'ye' : 1, 'y' : 1,\n 'no' : 0, 'n' : 0,\n 'quit' :-1, 'qui' :-1, 'qu' :-1, 'q':-1\n }\n\n if default == None : prompt = ' [y/n/r/q]: '\n elif default == \"yes\" : prompt = ' [Y/n/r/q]: '\n elif default == \"no\" : prompt = ' [y/N/r/q]: '\n elif default == \"retry\" : prompt = ' [y/n/R/q]: '\n elif default == \"quit\" : prompt = ' [y/n/r/Q]: '\n else: raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '': return default\n elif choice in valid.keys(): return valid[choice]\n else: sys.stdout.write(\"Invalid Response. Please use 'yes', 'no', 'retry' or 'quit'.\\n\")",
"def fetch_reply(query, session_id):\n\tresponse = apiai_response(query, session_id)\n\t#print(response)\n\tintent, params = parse_response(response)\n\treply = {}\n\tif response['result']['action'].startswith('smalltalk'):\n\t\treply['type'] = 'smalltalk'\n\t\treply['data'] = response['result']['fulfillment']['speech']\n\t\n\n\telif intent == \"points_table\":\n\t\treply['type']=\"points_table\"\n\t\treply['data']=func(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\n\telif intent==\"live_score\":\n\t\treply['type']='live_score'\n\t\treply['data']=livescore(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\n\telif intent=='fixture':\n\t\treply['type']='fixture'\n\t\treply['data']=fixture(params)\n\t\tprint(reply['data'])\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\n\telif intent=='result_today':\n\t\treply['type']='resulttoday'\n\t\treply['data']=resultstoday(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\n\telif intent=='world_ranking':\n\t\treply['type']='worldranking'\n\t\treply['data']=worldranking(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\n\t\n\telif intent=='worldranking_team':\n\t\treply['type']='worldrankingforteam'\n\t\treply['data']=worldrankingforteam(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\n\telif intent=='live_score':\n\t\treply['type']='livescore'\n\t\treply['data']=livescore(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\t\n\telif intent=='playerranking':\n\t\treply['type']='playerranking'\n\t\treply['data']=playerranking(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\t\n\telif intent=='scorer':\n\t\treply['type']='scorer'\n\t\treply['data']=scorer(params)\n\t\tif reply['data']==\"error\":\n\t\t\treply['type']='none'\n\t\n\telse:\n\t\treply['type'] = 'none'\n\t\treply['data'] = 'Sorry'\n\t\t\n\treturn reply",
"def give_an_example():\n # Choose randomly from the list of possible types\n prompt, answer = possible_types_with_functions[randrange(len(possible_types_with_functions))]\n # Ask for the response from the user\n response = input(\"Please give me an example of a %s: \" % prompt)\n try:\n # Try to do the conversion with the function\n # This will raise a ValueError if the conversion cannot be done\n answer(response)\n # Float is an exceptional case because numbers without decimals can be converted to floats\n # So need to make sure there is a decimal point in the response\n if answer == float:\n if \".\" not in response:\n print(\"Incorrect! Floats must include a decimal point.\")\n return 0\n # Make sure strings are wrapped in single or double quotes\n elif answer == str:\n string_incorrect_message = \"Incorrect! Please wrap strings in single or double quotes.\"\n if response[0] == \"'\":\n if response[-1] != \"'\":\n print(string_incorrect_message)\n return 0\n elif response[0] == \"\\\"\":\n if response[-1] != \"\\\"\":\n print(string_incorrect_message)\n return 0\n else:\n print(string_incorrect_message)\n return 0\n # Boolean is another exceptional case because many other types can be converted to booleans\n elif answer == bool:\n if response != \"True\" and response != \"False\":\n if response == \"true\" or response == \"false\":\n print(\"Incorrect! Booleans in Python must be capitalized.\")\n else:\n print(\"Incorrect!\")\n return 0\n # If no exception was raised, the response was correct\n print(\"Correct! Great work!\")\n return 1\n except ValueError:\n # If an exception was raised, the response was incorrect\n print(\"Incorrect!\")\n return 0",
"def ask(*args):\n #specify q_num as a global variable so it can be edited\n global q_num\n\n if (len(q_input.get())) != 0:\n answers.append(q_input.get())\n #empty text box\n q_input.delete(0, END)\n\n #if the current question number is in breaks, dont run continue story, go to print story: do_story()\n if q_num in breaks:\n do_story()\n if q_num != 8:\n return \n \n #hide story and continue button\n story_lbl.pack_forget()\n continue_button.pack_forget()\n\n #add one to the question number\n #append answer from textbox to answers list\n #return string for question number and move to next question text\n q_num+=1\n q_act.set(questions[q_num-1])\n q_num_str.set('Question '+str(q_num)+' of '+str(len(questions)))\n\n #if the question number is in the breaks list, call do_story\n\n #get ready to ask new question\n #repackage (show) asking elements)\n q_text.pack(pady='10')\n q_input.pack(pady='20')\n q_input.config(state='normal')\n q_num_label.pack(pady='10')\n submit_button.pack()\n bottom_instructions.pack(side='bottom')",
"def main():\n expression = generate_expression()\n answer = solve_expression(expression)\n print('The math quiz is the following:\\n')\n print(expression, '\\n')\n user_answer = input('Enter your answer here: ')\n if user_answer == str(answer):\n print('Right! Well done!\\n')\n else:\n print(f'Wrong! The correct answer is {answer}\\n')",
"def trivia(self):\r\n self.write_to_chat(\"pls trivia\")\r\n r=\"pls trivia\"\r\n while \"pls trivia\" in r:\r\n r=self.read_chat()\r\n sleep(1)\r\n self.write_to_chat(choice([\"a\",\"b\",\"c\",\"d\"]))",
"def respond(user_input):\n\toverall_response = []\n\twordlist = split(' ',remove_punctuation(user_input))\n\twordlist[0] = wordlist[0].lower()\n\tmapped_wordlist = you_me_map(wordlist)\n\tmapped_wordlist[0] = mapped_wordlist[0].capitalize()\n\t\n\tglobal STATE # current state - used to get preference reasoning\n\tglobal PREFERENCE_STATE # the preference that is currently being discussed\n\tglobal HELLO_SAID # if hello has been already said or not\n\n\t# Non-Response Rule =================================\n\t# 1) User has sent nothing or has only sent punctuation\n\tif wordlist[0] == '':\n\t\toverall_response += [ random.choice(NO_INPUT_RESPONSES) ]\n\t\t# cycles through multple responses\n\telse:\n\t\t# Introduction Rules =======================================================\n\t\t# Introduction-type input of size <= 2 (\"hi there\", \"hello!\", \"what's up?\")\n\t\tif (len(wordlist) <= 2) and (wordlist[0] in INTRODUCTION_INPUTS):\n\t\t\t\n\t\t\t# 2) Memory Function: If the user has already said hello, we\n\t\t\t# already know and respond accordingly.\n\t\t\tif HELLO_SAID == True:\n\t\t\t\toverall_response += [ \"My homie, you already told me 'hello'. We ain't on that Adele stuff, we good.\" ]\n\t\t\telse:\n\t\t\t\toverall_response += [ random.choice(INTRODUCTION_RESPONSES) ]\n\t\t\t\t# cycles through multple responses\n\t\t\t# 3) Memory function: We want to know the user's name, so we ask\n\t\t\t# them if we don't know. If the name is known, it is used.\n\t\t\tif 'name' not in OPPONENT_INFO:\n\t\t\t\toverall_response += [ random.choice(NAME_ASKS)] # cycles through responses\n\t\t\t\tSTATE = \"name_asked\"\n\t\t\telse:\n\t\t\t\toverall_response += [ \"You know you smart, \" + stringify(OPPONENT_INFO['name']) +\".\"]\n\t\t\n\t\t# Name Getter Rules ======================================\n\t\t# Detects whether a user is saying their name.\n\t\t# 4) First two words are introductory.\n\t\t# Example: \"My name's Megh\" | \"Hi im megh\" | etc.\n\t\telif wordlist[0:2] in NAME_PHRASES[2]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,2)\n\t\t\twordlist = wordlist[2:]\n\t\t\n\t\t# 5) First three words are introductory.\n\t\t# Example: \"My name is Megh\" | \"Hi i am megh\" | etc.\n\t\telif wordlist[0:3] in NAME_PHRASES[3]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,3)\n\t\t\twordlist = wordlist[3:]\n\t\t\n\t\t# 6) First four words are introductory.\n\t\t# Example: \"Hi my name is Megh\"\n\t\telif wordlist[0:4] in NAME_PHRASES[4]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,4)\n\t\t\twordlist = wordlist[4:]\n\t\t\n\t\t# Preference Clarification Rules ==========================\n\t\t# This STATE occurs when the converser is being asked to\n\t\t# explain why they like something.\n\t\telif STATE == \"preference_clarification\":\n\t\t\t\n\t\t\t# 7) Memory function: Stores the reasoning for the preference.\n\t\t\tif \"because\" in wordlist:\n\t\t\t\tindex = wordlist.index(\"because\")\n\t\t\t\tpreference_reasoning = stringify(wordlist[index + 1:])\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\taddPositivePreferenceReasoningToOpponent(PREFERENCE_STATE, preference_reasoning)\n\t\t\t\toverall_response += [\"I gotchu, you like \" + PREFERENCE_STATE + \" \" + stringify(mapped_wordlist) + \"? We good here.\"]\n\t\t\t\tSTATE = \"\"\n\t\t\telse:\n\t\t\t\toverall_response += [\"You gotta specify. Why you like \" + PREFERENCE_STATE]\t\t\n\t\telse:\n\t\t\t# eliminate \"and\" if still present \n\t\t\t# so we can process next phrase\n\t\t\tif wordlist[0] in PROGRESSION_WORDS:\n\t\t\t\twordlist = wordlist[1:]\n\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\n\t\t\t# Positive Preferences Rules ======================================\n\t\t\t# 8) Checks if the first two words are preference verbs and stores\n\t\t\t# user's preferences.\n\t\t\tif (len(wordlist) > 1 and wordlist[0] == \"i\" and wordlist[1] in POSITIVE_PREFERENTIAL_VERBS)\t:\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\tpreference = wordlist[2]\n\t\t\t\taddPositivePreferenceKeyToOpponent(preference)\n\t\t\t\toverall_response += [\"Why do \" + stringify(mapped_wordlist) + \"?\"]\n\t\t\t\tPREFERENCE_STATE = preference\n\t\t\t\tSTATE = \"preference_clarification\"\n\t\t\telse: \n\n\t\t\t\t# General Phrase Rules ========================================\n\t\t\t\t# 9)\n\t\t\t\tif wordlist[0:3] == ['do','you','think']:\n\t\t\t\t\toptions = [\"I'm DJ Khaled, I got my own opinions but to be a Major Key you gotta answer your own. I can't help you.\",\n\t\t\t\t\t\t \t\t\"You askin' me something crazy cuz I think a lot of things. Why do you think \" + stringify(you_me_map(wordlist[3:])) + \".\" ]\n\t\t\t\t\toverall_response += [ random.choice(options) ]\n\t\t\t\t# 10)\n\t\t\t\tif wordlist[0:2] == ['i','am']:\n\t\t\t\t\toverall_response += [\"I'm all ears always fam. Why is you \" + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 11)\n\t\t\t\tif wordlist[0:2] == ['i','have']:\n\t\t\t\t\toverall_response += [\"Dang, how long you had that \" + stringify(mapped_wordlist[2:]) + '...?']\n\t\t\t\t# 12)\n\t\t\t\tif wordlist[0:2] == ['i','feel']:\n\t\t\t\t\toverall_response += [\"Check it, I feel that way too homie.\"]\n\t\t\t\t# 13) Memory function: If the converser proclaims that the bot is something (\"you are\"),\n\t\t\t\t# the proclamation is stored for future user\n\t\t\t\tif wordlist[0:2] == ['you','are']:\n\t\t\t\t\texisting_personalities = getBotPersonality()\n\t\t\t\t\tif len(existing_personalities) < 1:\n\t\t\t\t\t\toverall_response += [\"My homie, I'm not sure why you'd say I'm \" + stringify(mapped_wordlist[2:]) + ', but I noted it']\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\toverall_response += [\"You know I'm \" + stringify(mapped_wordlist[2:]) + '. I\\'m also ' + random.choice(existing_personalities)]\n\t\t\t\t\taddToBotPersonality(stringify(mapped_wordlist[2:]))\n\t\t\t\t# 14)\n\t\t\t\tif verbp(wordlist[0]):\n\t\t\t\t\toverall_response += [\"Why you want me to \" + stringify(mapped_wordlist) + '?']\n\t\t\t\t# 15)\n\t\t\t\tif wordlist[0:2]==['can','you'] or wordlist[0:2]==['could','you']:\n\t\t\t\t\toverall_response += [\"Bruh, that's some talk about \" + wordlist[0] + ' ' + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 16) Sentences begin with positive or negative words (yes, no)\n\t\t\t\tif wordlist[0] in NEGATIVE_WORDS or wordlist[0] in POSITIVE_PREFERENTIAL_VERBS:\n\t\t\t\t\toverall_response += [\"You feel how you feel mah homie. I'm wit it.\"]\n\t\t\t\t# Preference Response rules ========================================\n\t\t\t\t# if the phrase begins with \"do you like\", the first word is\n\t\t\t\t# used to determine questioning purpose and the following words\n\t\t\t\t# are used to determine subject\n\t\t\t\tif wordlist[0:2] == [\"what\", \"is\"] or wordlist[0:1] == [\"whats\"]:\n\t\t\t\t\t# if it is a personal question, resort\n\t\t\t\t\t# to personal info\n\t\t\t\t\tif 'whats' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 1\n\t\t\t\t\telif 'what' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 2\n\n\t\t\t\t\t# 17) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with its own info.\n\t\t\t\t\tif wordlist[start_index] == \"your\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in BOT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, my \" + subject + \" is \" + BOT_INFO[subject] + \".\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? Take a guess fam.\"]\n\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"And I know yo \" + subject + \" is \" + stringify(OPPONENT_INFO[subject])]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toverall_response += [ \"What's your \" + subject + \" fam?\"]\n\t\t\t\t\t\t\t\n\t\t\t\t\t# 18) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with stored info about the bot.\n\t\t\t\t\telif wordlist[start_index] == \"my\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, yo \" + subject + \" is \" + OPPONENT_INFO[subject] + \". You already told me that tho!\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? You tell me fam.\"]\n\t\t\t\t# 19)\n\t\t\t\tif wpred(wordlist[0]):\n\t\t\t\t\toverall_response += [ \"I don't answer no questions like that. You tell me \" + wordlist[0] +\".\"]\n\t\t\t\t# 20) Memory function: If converser is asking about Bot's preferences, preferences are created\n\t\t\t\t# and returned.\n\t\t\t\tif wordlist[0:3] == [\"do\", \"you\", \"like\"] or wordlist[0:4] == [\"what\",\"do\", \"you\", \"like\"]:\n\t\t\t\t\t# do you like ....\n\t\t\t\t\t# determine if question is directed\n\t\t\t\t\twhat_question = True if wordlist[0] == \"what\" else False\n\n\t\t\t\t\tif what_question:\n\t\t\t\t\t\tsubject = wordlist[4:] if len(wordlist) > 4 else None\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubject = wordlist[3:] if len(wordlist) > 3 else None\n\n\t\t\t\t\t# determine if a subject exists:\n\t\t\t\t\tresp = \"\"\n\t\t\t\t\tif subject == None:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\toverall_response += [ \"What do I like about what?\" ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toverall_response += [ \"Do I like what?\" ]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\t# what do you like about []\n\t\t\t\t\t\t\tif subject[0] == \"about\":\n\t\t\t\t\t\t\t\tsubject = stringify(subject[1:])\n\t\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\t\toverall_response += [ \"What do YOU like about \" + subject + \"?\" ]\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# what do you like []??\n\t\t\t\t\t\t\t\toverall_response += [\"About what?\"]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# do you like [subject]\n\t\t\t\t\t\t\tsubject = stringify(subject)\n\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\toverall_response += [ \"Oh absolutely, I \" + random.choice(POSITIVE_PREFERENTIAL_VERBS) + \" \" + subject + \".\"]\n\t\t\t\t\t\t\tif opponentHasPositivePreference(subject):\n\t\t\t\t\t\t\t\toverall_response += [ \"I know you like \" + subject + \" too, right ;)?\" ]\n\t\t\t\t\t\n\t\t\t\n\t\t\t\t# 21)\n\t\t\t\tif 'major key' in wordlist:\n\t\t\t\t\toverall_response += [\"You got it with that major key talk. You mah homie now.\"]\n\t\t\t\t# 22)\n\t\t\t\telif 'da best' in wordlist:\n\t\t\t\t\toverall_response += [\"You da best, we da best, everyone's da best. We good.\"]\n\t\t\t\t# 23)\n\t\t\t\telif 'changed a lot' in wordlist:\n\t\t\t\t\toverall_response += [\"We all changed a lot. The world is changin' just like you and me. We good out here.\"]\n\n\t\t\t\tif len(overall_response) == 0:\n\t\t\t\t\toverall_response += [ punt() ]\n\t# Debug: print(' '.join(overall_response))\n\treturn ' '.join(overall_response)",
"def do_answer():\n global nextQuestion, responses\n answer = request.form['answer']\n responses.append(answer)\n nextQuestion += 1\n if nextQuestion >= len(survey.questions):\n return redirect(\"/thanks\")\n return redirect(f\"/questions/{nextQuestion}\")",
"def scene_8(self):\n print(\"Here is the riddle: \\n\")\n riddle = random.choice(riddle_list)\n riddle_num = riddle_list.index(riddle)\n print(riddle)\n response = raw_input(\"Answer:\").lower()\n \n if response == riddle_ans[riddle_num]:\n print(\n \"Congratulation! You got the right answer! However, I \",\n \"am going to eat you! You kids will be stuck here \", \n \"forever.You will never win. That is why I am a bad witch! \\n\", \n sep=\"\\n\"\n )\n time.sleep(1)\n print(\n \"Crash! The door breaks! Comes in is a voracious dragon. \", \n \"Fire coming everywhere. It came to save you, since you \", \n \"didn\\'t wake it up when it was sleeping in his cave!\",\n sep=\"\\n\",\n )\n sys.exit()\n else:\n print(\"Sorry! That is Wrong. Bye, Bye!!!!\")\n time.sleep(1)\n print(\"HELP! SOMEBODY! HEL.....!\")\n sys.exit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Response function, which holds a list of possible answers. when called function uses the random module and randomly chooses an answer from the and returns the choice
|
def response():
response_list = ['Yes', 'No', 'My sources point to yes', 'Maybe', 'The outcome does not look good',
"I can't say for sure", "Perhaps", "Don't count on it", "Everything is blurry... Ask again...",
"The spirits say... Yes", "The spirits say... No", "Chances are not good", "Chances are good",
"I think not", "No straight answer...", "You can count on it", "The outcome looks good",
"My sources point to... No", "I think so", "The spirits have left... Try again in a moment...",
"If I were you, I would bet on it.", "If I were you I wouldn't bet on it."]
return random.choice(response_list)
|
[
"def generateResponse(self, questionText):\n\n if self.randomly:\n return random.choice(self.responses)\n else:\n return self.responses[0]",
"def get_sadge_response():\n return random.choice(sadge_list)",
"def make_multiple_choice(question, choices, which=1, randomize=True, aota=False, \n nota=False, none_prob=0.2, number=None):\n\n import random\n import string\n\n AOTA = \"All of the above\"\n NOTA = \"None of the above\"\n \n # Select right answer if given\n if which > 0:\n correct = choices[which-1]\n elif which == 0:\n correct = AOTA\n else: # which == -1:\n correct = NOTA\n \n # Randomize order of options before appending 'All ...' or 'None ...'\n if randomize:\n random.shuffle(choices)\n\n # Append 'All of the above'\n if aota or which == 0 :\n choices.append(AOTA)\n\n # Append 'None of the above'\n if nota or which == -1:\n choices.append(NOTA)\n\n # Remove the correct answer with probability none_prob\n if which > 0 and nota and none_prob > 0:\n if random.random() <= none_prob:\n choices.remove(correct)\n correct = NOTA\n # if not enough options after removing the correct answer, add ALL\n if (not aota) and len(choices) <= 4:\n choices.insert(len(choices)-1, AOTA)\n \n # get correct answer\n answer = string.ascii_lowercase[choices.index(correct)]\n\n # format if number present\n if number is None:\n blank = \"\\n\"\n else:\n # format the question\n if(number > 9):\n blank = \"\\n \"\n else:\n blank = \"\\n \"\n \n question = str(number) + \". \" + question\n\n # Format the question with options \n for i, choice in enumerate(choices):\n question += blank + string.ascii_lowercase[i] + \") \" + choice\n\n return question, answer",
"def get_random_word():\n\tresponse = True\n\tword = \"\"\n\tfood = [\n\t\t'chips', 'sushi', 'boba', 'spaghetti', 'adobo', 'rice', 'hamburger',\n\t\t'cheeseburger', 'sandwich']\n\tanimals = [\n\t\t'chicken', 'pig', 'dolphin', 'giraffe', 'elephant', 'dinosaur',\n\t\t'shark', 'rhino', 'lion', 'owl', 'zebra']\n\tartists = [\n\t\t'Beyonce', 'Khalid', 'Willie Nelson', 'Sam Smith', 'Pentatonix',\n\t\t'Buddy Holly', 'Selena Gomez', 'Kendrick Lamar', 'Demi Lovato']\n\tbrands = [\n\t\t'Nike', 'Gucci', 'Chanel', 'Adidas', 'Apple', 'Vans',\n\t\t'Ralph Lauren', 'Converse', 'Louis Vuitton', 'Vera Bradley']\n\tcolleges = [\n\t\t'Texas Tech', 'University of Houston', 'NorthWestern',\n\t\t'University of Texas at Austin', 'Stanford', 'Harvard',\n\t\t'Cambridge', 'Mississippi State', 'University of North Texas']\n\twhile response is True:\n\n\t\tresponse = input(\n\t\t\t\" There are 5 categories for you to choose from.\\n\"\n\t\t\t\"(1) Food\\n\"\n\t\t\t\"(2) Animals\\n\"\n\t\t\t\"(3) Artists\\n\"\n\t\t\t\"(4) Brands\\n\"\n\t\t\t\"(5) Colleges\\n>> \")\n\n\t\tif response is \"1\":\n\t\t\tword = random.choice(food).upper()\n\t\telif response is \"2\":\n\t\t\tword = random.choice(animals).upper()\n\t\telif response is \"3\":\n\t\t\tword = random.choice(artists).upper()\n\t\telif response is \"4\":\n\t\t\tword = random.choice(brands).upper()\n\t\telif response is \"5\":\n\t\t\tword = random.choice(colleges).upper()\n\t\telse:\n\t\t\tprint(\"Please input a number 1 - 5...\")\n\n\tcorrect_answers = list(word.replace(\" \", \"\"))\n\tword_to_guess = list(len(word) * \"_\")\n\tprint(75 * '\\n')\n\n\treturn word, correct_answers, word_to_guess",
"def get_answer_to_question(question):\n answers = get_all_answers_to_question(question)\n\n cosmic_random = random.SystemRandom()\n response_type = cosmic_random.randint(0, 2)\n\n if response_type == 0:\n return answers[0].get_prophecy_text()\n\n elif response_type == 1:\n return \"According to {} the answer is \\\"{}\\\"\".format(\n answers[0].get_oracle().oracle_name,\n answers[0].get_prophecy_text()\n )\n\n elif response_type == 2:\n return \"According to {} the answer is \\\"{}\\\" and according to {} it is \\\"{}\\\"\".format(\n answers[0].get_oracle().oracle_name,\n answers[0].get_prophecy_text(),\n answers[1].get_oracle().oracle_name,\n answers[1].get_prophecy_text()\n )\n\n return answers[0].get_prophecy_text()",
"def select_question(questions: list) -> Tuple[str, str]:\n if (len(questions) == 0):\n return None\n return random.choice(questions)",
"def simulate(self, choice_function=None, save_probabilities=False, **kwargs):\n choosers, alternatives = self.calculate_model_variables()\n \n # By convention, choosers are denoted by a -1 value in the choice column\n choosers = choosers[choosers[self.choice_column] == -1]\n print \"%s agents are making a choice.\" % len(choosers)\n\n if choice_function:\n choices = choice_function(self, choosers, alternatives, **kwargs)\n else:\n choices = self.predict(choosers, alternatives, debug=True)\n \n if save_probabilities:\n if not self.sim_pdf:\n probabilities = self.calculate_probabilities(self, choosers, alternatives)\n else:\n probabilities = self.sim_pdf.reset_index().set_index('alternative_id')[0]\n orca.add_injectable('probabilities_%s_%s' % (self.name, orca.get_injectable('iter_var')),\n probabilities)\n \n return choices",
"def post_possible_answers(channel: Channel, answers: List[str]) -> float:\n random.shuffle(answers)\n\n attachments = []\n for col, answer in zip(CHOICE_COLORS, answers):\n ans_att = {'text': answer, 'color': col}\n attachments.append(ans_att)\n\n return bot.post_message(channel, '', attachments=attachments)['ts']",
"def respond(user_input):\n\toverall_response = []\n\twordlist = split(' ',remove_punctuation(user_input))\n\twordlist[0] = wordlist[0].lower()\n\tmapped_wordlist = you_me_map(wordlist)\n\tmapped_wordlist[0] = mapped_wordlist[0].capitalize()\n\t\n\tglobal STATE # current state - used to get preference reasoning\n\tglobal PREFERENCE_STATE # the preference that is currently being discussed\n\tglobal HELLO_SAID # if hello has been already said or not\n\n\t# Non-Response Rule =================================\n\t# 1) User has sent nothing or has only sent punctuation\n\tif wordlist[0] == '':\n\t\toverall_response += [ random.choice(NO_INPUT_RESPONSES) ]\n\t\t# cycles through multple responses\n\telse:\n\t\t# Introduction Rules =======================================================\n\t\t# Introduction-type input of size <= 2 (\"hi there\", \"hello!\", \"what's up?\")\n\t\tif (len(wordlist) <= 2) and (wordlist[0] in INTRODUCTION_INPUTS):\n\t\t\t\n\t\t\t# 2) Memory Function: If the user has already said hello, we\n\t\t\t# already know and respond accordingly.\n\t\t\tif HELLO_SAID == True:\n\t\t\t\toverall_response += [ \"My homie, you already told me 'hello'. We ain't on that Adele stuff, we good.\" ]\n\t\t\telse:\n\t\t\t\toverall_response += [ random.choice(INTRODUCTION_RESPONSES) ]\n\t\t\t\t# cycles through multple responses\n\t\t\t# 3) Memory function: We want to know the user's name, so we ask\n\t\t\t# them if we don't know. If the name is known, it is used.\n\t\t\tif 'name' not in OPPONENT_INFO:\n\t\t\t\toverall_response += [ random.choice(NAME_ASKS)] # cycles through responses\n\t\t\t\tSTATE = \"name_asked\"\n\t\t\telse:\n\t\t\t\toverall_response += [ \"You know you smart, \" + stringify(OPPONENT_INFO['name']) +\".\"]\n\t\t\n\t\t# Name Getter Rules ======================================\n\t\t# Detects whether a user is saying their name.\n\t\t# 4) First two words are introductory.\n\t\t# Example: \"My name's Megh\" | \"Hi im megh\" | etc.\n\t\telif wordlist[0:2] in NAME_PHRASES[2]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,2)\n\t\t\twordlist = wordlist[2:]\n\t\t\n\t\t# 5) First three words are introductory.\n\t\t# Example: \"My name is Megh\" | \"Hi i am megh\" | etc.\n\t\telif wordlist[0:3] in NAME_PHRASES[3]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,3)\n\t\t\twordlist = wordlist[3:]\n\t\t\n\t\t# 6) First four words are introductory.\n\t\t# Example: \"Hi my name is Megh\"\n\t\telif wordlist[0:4] in NAME_PHRASES[4]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,4)\n\t\t\twordlist = wordlist[4:]\n\t\t\n\t\t# Preference Clarification Rules ==========================\n\t\t# This STATE occurs when the converser is being asked to\n\t\t# explain why they like something.\n\t\telif STATE == \"preference_clarification\":\n\t\t\t\n\t\t\t# 7) Memory function: Stores the reasoning for the preference.\n\t\t\tif \"because\" in wordlist:\n\t\t\t\tindex = wordlist.index(\"because\")\n\t\t\t\tpreference_reasoning = stringify(wordlist[index + 1:])\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\taddPositivePreferenceReasoningToOpponent(PREFERENCE_STATE, preference_reasoning)\n\t\t\t\toverall_response += [\"I gotchu, you like \" + PREFERENCE_STATE + \" \" + stringify(mapped_wordlist) + \"? We good here.\"]\n\t\t\t\tSTATE = \"\"\n\t\t\telse:\n\t\t\t\toverall_response += [\"You gotta specify. Why you like \" + PREFERENCE_STATE]\t\t\n\t\telse:\n\t\t\t# eliminate \"and\" if still present \n\t\t\t# so we can process next phrase\n\t\t\tif wordlist[0] in PROGRESSION_WORDS:\n\t\t\t\twordlist = wordlist[1:]\n\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\n\t\t\t# Positive Preferences Rules ======================================\n\t\t\t# 8) Checks if the first two words are preference verbs and stores\n\t\t\t# user's preferences.\n\t\t\tif (len(wordlist) > 1 and wordlist[0] == \"i\" and wordlist[1] in POSITIVE_PREFERENTIAL_VERBS)\t:\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\tpreference = wordlist[2]\n\t\t\t\taddPositivePreferenceKeyToOpponent(preference)\n\t\t\t\toverall_response += [\"Why do \" + stringify(mapped_wordlist) + \"?\"]\n\t\t\t\tPREFERENCE_STATE = preference\n\t\t\t\tSTATE = \"preference_clarification\"\n\t\t\telse: \n\n\t\t\t\t# General Phrase Rules ========================================\n\t\t\t\t# 9)\n\t\t\t\tif wordlist[0:3] == ['do','you','think']:\n\t\t\t\t\toptions = [\"I'm DJ Khaled, I got my own opinions but to be a Major Key you gotta answer your own. I can't help you.\",\n\t\t\t\t\t\t \t\t\"You askin' me something crazy cuz I think a lot of things. Why do you think \" + stringify(you_me_map(wordlist[3:])) + \".\" ]\n\t\t\t\t\toverall_response += [ random.choice(options) ]\n\t\t\t\t# 10)\n\t\t\t\tif wordlist[0:2] == ['i','am']:\n\t\t\t\t\toverall_response += [\"I'm all ears always fam. Why is you \" + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 11)\n\t\t\t\tif wordlist[0:2] == ['i','have']:\n\t\t\t\t\toverall_response += [\"Dang, how long you had that \" + stringify(mapped_wordlist[2:]) + '...?']\n\t\t\t\t# 12)\n\t\t\t\tif wordlist[0:2] == ['i','feel']:\n\t\t\t\t\toverall_response += [\"Check it, I feel that way too homie.\"]\n\t\t\t\t# 13) Memory function: If the converser proclaims that the bot is something (\"you are\"),\n\t\t\t\t# the proclamation is stored for future user\n\t\t\t\tif wordlist[0:2] == ['you','are']:\n\t\t\t\t\texisting_personalities = getBotPersonality()\n\t\t\t\t\tif len(existing_personalities) < 1:\n\t\t\t\t\t\toverall_response += [\"My homie, I'm not sure why you'd say I'm \" + stringify(mapped_wordlist[2:]) + ', but I noted it']\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\toverall_response += [\"You know I'm \" + stringify(mapped_wordlist[2:]) + '. I\\'m also ' + random.choice(existing_personalities)]\n\t\t\t\t\taddToBotPersonality(stringify(mapped_wordlist[2:]))\n\t\t\t\t# 14)\n\t\t\t\tif verbp(wordlist[0]):\n\t\t\t\t\toverall_response += [\"Why you want me to \" + stringify(mapped_wordlist) + '?']\n\t\t\t\t# 15)\n\t\t\t\tif wordlist[0:2]==['can','you'] or wordlist[0:2]==['could','you']:\n\t\t\t\t\toverall_response += [\"Bruh, that's some talk about \" + wordlist[0] + ' ' + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 16) Sentences begin with positive or negative words (yes, no)\n\t\t\t\tif wordlist[0] in NEGATIVE_WORDS or wordlist[0] in POSITIVE_PREFERENTIAL_VERBS:\n\t\t\t\t\toverall_response += [\"You feel how you feel mah homie. I'm wit it.\"]\n\t\t\t\t# Preference Response rules ========================================\n\t\t\t\t# if the phrase begins with \"do you like\", the first word is\n\t\t\t\t# used to determine questioning purpose and the following words\n\t\t\t\t# are used to determine subject\n\t\t\t\tif wordlist[0:2] == [\"what\", \"is\"] or wordlist[0:1] == [\"whats\"]:\n\t\t\t\t\t# if it is a personal question, resort\n\t\t\t\t\t# to personal info\n\t\t\t\t\tif 'whats' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 1\n\t\t\t\t\telif 'what' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 2\n\n\t\t\t\t\t# 17) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with its own info.\n\t\t\t\t\tif wordlist[start_index] == \"your\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in BOT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, my \" + subject + \" is \" + BOT_INFO[subject] + \".\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? Take a guess fam.\"]\n\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"And I know yo \" + subject + \" is \" + stringify(OPPONENT_INFO[subject])]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toverall_response += [ \"What's your \" + subject + \" fam?\"]\n\t\t\t\t\t\t\t\n\t\t\t\t\t# 18) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with stored info about the bot.\n\t\t\t\t\telif wordlist[start_index] == \"my\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, yo \" + subject + \" is \" + OPPONENT_INFO[subject] + \". You already told me that tho!\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? You tell me fam.\"]\n\t\t\t\t# 19)\n\t\t\t\tif wpred(wordlist[0]):\n\t\t\t\t\toverall_response += [ \"I don't answer no questions like that. You tell me \" + wordlist[0] +\".\"]\n\t\t\t\t# 20) Memory function: If converser is asking about Bot's preferences, preferences are created\n\t\t\t\t# and returned.\n\t\t\t\tif wordlist[0:3] == [\"do\", \"you\", \"like\"] or wordlist[0:4] == [\"what\",\"do\", \"you\", \"like\"]:\n\t\t\t\t\t# do you like ....\n\t\t\t\t\t# determine if question is directed\n\t\t\t\t\twhat_question = True if wordlist[0] == \"what\" else False\n\n\t\t\t\t\tif what_question:\n\t\t\t\t\t\tsubject = wordlist[4:] if len(wordlist) > 4 else None\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubject = wordlist[3:] if len(wordlist) > 3 else None\n\n\t\t\t\t\t# determine if a subject exists:\n\t\t\t\t\tresp = \"\"\n\t\t\t\t\tif subject == None:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\toverall_response += [ \"What do I like about what?\" ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toverall_response += [ \"Do I like what?\" ]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\t# what do you like about []\n\t\t\t\t\t\t\tif subject[0] == \"about\":\n\t\t\t\t\t\t\t\tsubject = stringify(subject[1:])\n\t\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\t\toverall_response += [ \"What do YOU like about \" + subject + \"?\" ]\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# what do you like []??\n\t\t\t\t\t\t\t\toverall_response += [\"About what?\"]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# do you like [subject]\n\t\t\t\t\t\t\tsubject = stringify(subject)\n\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\toverall_response += [ \"Oh absolutely, I \" + random.choice(POSITIVE_PREFERENTIAL_VERBS) + \" \" + subject + \".\"]\n\t\t\t\t\t\t\tif opponentHasPositivePreference(subject):\n\t\t\t\t\t\t\t\toverall_response += [ \"I know you like \" + subject + \" too, right ;)?\" ]\n\t\t\t\t\t\n\t\t\t\n\t\t\t\t# 21)\n\t\t\t\tif 'major key' in wordlist:\n\t\t\t\t\toverall_response += [\"You got it with that major key talk. You mah homie now.\"]\n\t\t\t\t# 22)\n\t\t\t\telif 'da best' in wordlist:\n\t\t\t\t\toverall_response += [\"You da best, we da best, everyone's da best. We good.\"]\n\t\t\t\t# 23)\n\t\t\t\telif 'changed a lot' in wordlist:\n\t\t\t\t\toverall_response += [\"We all changed a lot. The world is changin' just like you and me. We good out here.\"]\n\n\t\t\t\tif len(overall_response) == 0:\n\t\t\t\t\toverall_response += [ punt() ]\n\t# Debug: print(' '.join(overall_response))\n\treturn ' '.join(overall_response)",
"def next_question(): \n return random.choice(models.Question.objects(valid=True))",
"def sample_answers(y, product_set, p_idk = 0.1, p_2a = 0.3, p_3a = 0.15):\n # Get set of possible questions available in the product catalog\n question_set = set(product_set[\"PropertyDefinitionId\"].values) # faster\n \n # Get dict of (true) answers available for the target product\n quest_answer_y = algo_utils.get_answers_y(y, product_set) \n result = {}\n \n # For each question sample additional answers \n # or replace true answer by idk if necessary.\n for question in question_set:\n # Sample random number b/w 0 and 1.\n u = random.random()\n # Sample if user says idk\n if u < p_idk:\n result[question] = ['idk'] \n # Else if it is possible sample if user give additional answers.\n elif quest_answer_y[question]=='none': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]]\n elif quest_answer_y[question]=='idk': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]] \n # Giving 2 answers?\n elif u < p_idk+p_2a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=1)\n # If the drawn 2nd answer is the same, redraw one\n while (str(quest_answer_y[question]) in sample.astype(str)): \n sample = np.random.choice(possible, size=1)\n result[question] = np.append([quest_answer_y[question]], sample) \n # Giving 3 answers?\n elif u < p_idk+p_2a+p_3a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=2, replace=False)\n # If the drawn 2nd or 3rd answer is the same, redraw 2 answers\n while (str(quest_answer_y[question]) in sample.astype(str)):\n sample = np.random.choice(possible, size=2)\n result[question] = np.append([quest_answer_y[question]], sample)\n # Else keep only the true answer \n else:\n result[question] = [quest_answer_y[question]] \n return(result)",
"def randomize_answers(self):\n options = [self.answer, self.op1, self.op2, self.op3]\n new_order = randomize(options)\n random_options = {\n \"A\": new_order[0],\n \"B\": new_order[1],\n \"C\": new_order[2],\n \"D\": new_order[3]\n }\n return random_options",
"def yelp_pick(self, msg, args):\n restaurants = self._search_yelp()\n restaurant = random.choice(restaurants)\n return self.format_result_card(msg, restaurant)",
"def random_choice(self, state):\n self.__curiosity *= self.__curiosity_factor\n\n if self.__last_random_choice is None:\n choices = self.actions\n else:\n choices = self.actions + [self.__last_random_choice]\n\n if self.__curiosity <= 0.5: # Wait for the half time of e-greedy search strategy\n print \"BIAS=\"+str(self.bias[1])\n # Take random choices or positive/negative only actions\n if self.bias[1] > 0:\n choices = filter(lambda (p,a): a>=0 and p==self.bias[0], filter(lambda c: c is not None, choices))\n choices += tuple([None])\n elif self.bias[1] < 0:\n choices = filter(lambda (p,a): a<0 and p==self.bias[0], filter(lambda c: c is not None, choices))\n choices += tuple([None])\n return choice(choices)",
"def setup_response(self):\n # call secondary setup for MultipleChoice questions, to set name\n # attributes\n self.mc_setup_response()\n\n # define correct choices (after calling secondary setup)\n xml = self.xml\n cxml = xml.xpath('//*[@id=$id]//choice', id=xml.get('id'))\n\n # contextualize correct attribute and then select ones for which\n # correct = \"true\"\n self.correct_choices = [\n contextualize_text(choice.get('name'), self.context)\n for choice in cxml\n if contextualize_text(choice.get('correct'), self.context).upper() == \"TRUE\"\n ]\n\n if self.has_partial_credit:\n self.partial_choices = [\n contextualize_text(choice.get('name'), self.context)\n for choice in cxml\n if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'\n ]\n self.partial_values = [\n float(choice.get('point_value', default='0.5')) # Default partial credit: 50%\n for choice in cxml\n if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'\n ]",
"def accept_random(self):\r\n\r\n if not (len(self.ch_list) == 0):\r\n # accept random challenges from a list of challenges\r\n choice = random.choice(self.ch_list)\r\n if (not isinstance(choice[0], KnightErrant)) or ((isinstance(choice[0], KnightErrant) and (not choice[0].traveling))):\r\n # choice[challenger, recipient, skill]\r\n # challenge(self, recipient, skill)\r\n # self will challenge challenger choice[0]\r\n self.direct_accept_challenge(choice[0], choice[2])\r\n self.ch_list.remove(choice)",
"def generate_pat_questionnaire() -> list:\n print(\"The next few questions are intended to survey your understanding across five broad categories of the PAT\"\n \"syllabus. On a scale of 1 (least understanding) to 5 (best understanding), select your response.\")\n responses = [0] * NUM_PAT_TOPICS\n\n for i in range(NUM_PAT_TOPICS):\n while True:\n try:\n responses[i] = int(input(\"Enter an integer from 1 to 5 for\"+ PAT_TOPICS[i] + \": \"))\n if responses[i] >= 1 and responses[i] <= 5:\n break\n else:\n print(\"Please enter an integer between 1 to 5\")\n except ValueError:\n print(\"Please enter an integer number.\")\n return responses",
"def pick_random_questions(num_questions):\n print(\"=====pick_random_questions fired...\")\n shuffle(QUESTIONS)\n questions = sample(list(QUESTIONS), k=num_questions)\n\n shuffle(questions)\n return questions",
"def display_question(content):\n\n question = random.randint(0, len(content[0])-1)\n print \"\\nUnit Test:\", content[0][question], ''\n options = [random.randint(0, len(content[1])-1),\n random.randint(0, len(content[1])-1),\n random.randint(0, len(content[1])-1)]\n options[random.randint(0,2)] = question\n print '1: ', content[1][options[0]],\n print '\\n2: ', content[1][options[1]],\n print '\\n3: ', content[1][options[2]],\n\n answer = input('\\nYour choice: ')\n\n answers_list = []\n answers_list.extend([options,answer,question])\n return answers_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Confirm that xz is installed
|
def test_xz():
lexed = shlex.split("command -v xz")
proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=True)
proc.wait()
return bool(proc.returncode == 0)
|
[
"def test_xrt_installed(host):\n _xrt_installed_only(host)\n assert host.file(\"/opt/xilinx/xrt/bin/xbutil\").exists",
"def test_xrt_aws_installed(host):\n _aws_only(host)\n _xrt_installed_only(host)\n assert host.file(\"/opt/xilinx/xrt/bin/awssak\").exists",
"def test_haproxy_is_installed(host):\n assert host.package(\"haproxy\").is_installed",
"def test_check_azcopy_install(self):\r\n downloader.check_azcopy_install()\r\n print(\"AzCopy is successfully installed.\")",
"def test_plone_app_dexterity_installed(self):\n qi = self.portal.portal_quickinstaller\n self.assertTrue(qi.isProductInstalled('plone.app.dexterity'))",
"def testPackageInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_INSTALLED)\n\n self.assertTrue(setup_common.PackageInstalled(\"fake_package\"))",
"def check_gzweb(ctx):\n return os.path.exists(ctx.get_product_file('bin', 'gzweb'))",
"def test_dependencies_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('ContentWellPortlets'))",
"def __is_installed() -> bool:\n try:\n check_call(\n [\"bash\", \"-c\", \"command -v keybase\"], stdout=DEVNULL, stderr=DEVNULL\n )\n return True\n except CalledProcessError:\n return False",
"def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('rapido.plone'))",
"def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('collective.sassy'))",
"def test_check_conda_installation_pacakge_is_installed():\n pytest_enable_socket()\n\n ## Install hg19-gaps-ucsc-v1\n recipe = \"hg19-gaps-ucsc-v1\"\n args = Namespace(channel='genomics', command='install', debug=False, name=[recipe], file=[] , prefix=None, id = None)\n try:\n install.install((), args)\n except SystemExit:\n pass\n jdict = install.check_ggd_recipe(recipe,\"genomics\")\n version = jdict[\"packages\"][recipe][\"version\"]\n \n\n ## Test that it is already installed\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n install.check_conda_installation(recipe)\n assert \"SystemExit\" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit() \n\n try:\n uninstall_hg19_gaps_ucsc_v1()\n except:\n pass",
"def testPackageNotInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_NONE_INSTALL)\n\n self.assertFalse(\n setup_common.PackageInstalled(\"fake_package\"))",
"def test_product_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('reptheory.policy'))",
"def check_pkgconfig ():\n if sys.platform == \"win32\":\n return os.system (\"pkg-config > NUL\") == 0\n else:\n return os.system (\"pkg-config 2> /dev/null\") == 256",
"def is_installed(self):\n return False",
"def test_dnsmasq_is_installed(host):\n assert host.package(\"dnsmasq\").is_installed",
"def _is_installed(self):\n return self._system.exists(os.path.join(self.get_install_path(), \"bin/root\"))",
"def is_installed(self) -> bool:\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decompress an xz resource
|
def decompress_xz(file_name):
lexed = shlex.split("xz -d \"%s\"" % file_name)
proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=False)
proc.wait()
return proc.returncode
|
[
"def RunDecompress(args):\n compressed_apex_fp = args.input\n decompressed_apex_fp = args.output\n return decompress(compressed_apex_fp, decompressed_apex_fp)",
"def __decompress_archive(self):\n self.decompress_path = self.cwd.joinpath(PathVariables.SRC__DECOMPRESSED)\n self.log.debug(\"decompress tar to %s: \" % self.decompress_path)\n\n self.tar_archive.extractall(self.cwd.joinpath(PathVariables.SRC__DECOMPRESSED))\n self.tar_archive.close()",
"def decompress_file(body, filename):\n if filename.endswith(\".gz\"):\n # contents = zlib.decompress(body, 16 + zlib.MAX_WBITS)\n zip_contents = BytesIO(body)\n with GzipFile(fileobj=zip_contents, mode='rb') as zip_read:\n try:\n contents = BytesIO(zip_read.read())\n except (IOError, OSError) as e:\n return None, \"Error decompressing gzip file: \" + str(e)\n zip_contents.close()\n elif filename.endswith(\".bz2\"):\n try:\n contents = BytesIO((bz2_decompress(body)))\n except IOError as e:\n return None, \"Error decompressing bz2 file: \" + str(e)\n else:\n contents = BytesIO((body))\n return contents, None",
"def unarchive():",
"def unzip_gz(url, outpath):\n\n with gzip.open(url, 'rb') as f1:\n with open('file.txt', 'wb') as f2:\n shutil.copyfileobj(f1, f2)",
"def do_compression(filename):\n query=\"laszip -i \"+filename+\" -olaz\"\n subprocess.run(query)\n os.remove(filename)",
"def decompress_string(compressed_string):\n with GzipFile(mode='rb', fileobj=BytesIO(compressed_string)) as f:\n return f.read().decode('utf-8', errors='ignore')",
"def testDecompress(self):\n decompressor = zlib_decompressor.ZlibDecompressor()\n\n compressed_data = (\n b'x\\x9c\\x0b\\xc9\\xc8,V\\x00\\xa2D\\x85\\x92\\xd4\\xe2\\x12=\\x00)\\x97\\x05$')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n decompressor = zlib_decompressor.ZlibDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')",
"def decompress_block(byte_stream):\n byte_stream = io.BytesIO(byte_stream)\n return gzip.GzipFile(fileobj=byte_stream).read()",
"def decompress(infile):\n decompressed_data = BytesIO()\n crc = 0\n while True:\n aes_header = struct.unpack('>3I', infile.read(12))\n decompressed_length = aes_header[0]\n compressed_length = aes_header[1]\n compressed_chunk = infile.read(compressed_length)\n crc = zlib.crc32(compressed_chunk, crc)\n decompressed_chunk = zlib.decompress(compressed_chunk)\n assert decompressed_length == len(decompressed_chunk)\n decompressed_data.write(decompressed_chunk)\n if aes_header[2] == 0:\n break\n decompressed_data.seek(0)\n return (decompressed_data, crc)",
"def decompress(compressed_lzma, decompressed_lzma=False):\r\n\r\n data = lzma.decompress(compressed_lzma)\r\n\r\n def unpack_bytes(data):\r\n size, = struct.unpack('<I', data[:4])\r\n data = data[4:]\r\n bs = struct.unpack(f'<{size}b', data[:size])\r\n data = data[size:]\r\n\r\n return bs, data\r\n\r\n xs, data = unpack_bytes(data)\r\n ys, data = unpack_bytes(data)\r\n zs, data = unpack_bytes(data)\r\n ws, data = unpack_bytes(data)\r\n\r\n xs = unsorted_diff_unpack_8_16(xs)\r\n ys = unsorted_diff_unpack_8_16(ys)\r\n\r\n ws = unpack_8_32(ws)\r\n ret = combine(xs, ys, zs, ws)\r\n if decompressed_lzma:\r\n return ret\r\n # format 1 is FORMAT_XZ, an implementation of lzma2, the most recent lzma\r\n # standard. However I've been told (but have not tested) by wtc that osu!\r\n # only accepts replays in format 2 (aka FORMAT_ALONE), the older lzma\r\n # standard.\r\n ret = lzma.compress(ret.encode('UTF-8'), format=2)\r\n return ret",
"def decompress(data: bytes, *, debug: bool = False) -> bytes:\n\t\n\treturn b\"\".join(decompress_stream(io.BytesIO(data), debug=debug))",
"def decompress(self):\n with open(self.in_path, \"rb\") as f, open(self.out_path, \"w\") as o, open(self.g_path, \"rb\") as g:\n self.node = pickle.load(g)\n bit_text = \"\"\n byte = f.read(1)\n while(byte != b\"\"):\n \n byte = ord(byte)\n bits = bin(byte)[2:].rjust(8, \"0\")\n bit_text += bits\n byte = f.read(1)\n \n depadded_text = self.depad_text(bit_text)\n decoded_text = self.decode_text(depadded_text)\n o.write(decoded_text)\n print(\"Decompressed\")",
"def decompress(self, input_file_path, output_file_path=None):\n data = bitarray(endian='big')\n output_buffer = []\n\n # read the input file\n try:\n with open(input_file_path, 'rb') as input_file:\n data.fromfile(input_file)\n except IOError:\n print('Could not open input file ...')\n raise\n\n while len(data) >= 24:\n # print(len(data))\n\n byte1 = ord(data[0:8].tobytes())\n byte2 = ord(data[8:16].tobytes())\n byte3 = ord(data[16:24].tobytes())\n del data[0:24]\n distance = (byte1 << 8) | byte2\n length = byte3\n\n if distance == 0:\n byte = data[0:8].tobytes()\n output_buffer.append(byte)\n del data[0:8]\n else:\n for i in range(length):\n output_buffer.append(output_buffer[-distance])\n if len(data) < 8:\n break\n byte = data[0:8].tobytes()\n output_buffer.append(byte)\n del data[0:8]\n out_data = b''.join(output_buffer)\n\n if output_file_path:\n try:\n with open(output_file_path, 'wb') as output_file:\n output_file.write(out_data)\n print('File was decompressed successfully and saved to output path ...')\n return None\n except IOError:\n print('Could not write to output file path. Please check if the path is correct ...')\n raise\n return out_data",
"def testDecompress(self):\n decompressor = zlib_decompressor.DeflateDecompressor()\n\n compressed_data = (\n b'\\x0b\\xc9\\xc8,V\\x00\\xa2D\\x85\\x92\\xd4\\xe2\\x12=\\x00)\\x97\\x05$')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n decompressor = zlib_decompressor.DeflateDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')",
"def zunpack(azip,workdir):\n try:\n azip.extractall(workdir)\n except AttributeError:\n #extractall not in the python2.5 library.\n path = \"\"\n for inf in azip.infolist():\n #Construct destination path.\n if inf.filename[0] == '/':\n path = os.path.join(workdir, inf.filename[1:])\n else:\n path = os.path.join(workdir, inf.filename)\n path = os.path.normpath(path)\n \n # Create all upper directories if necessary.\n upperdirs = os.path.dirname(path)\n if upperdirs and not os.path.exists(upperdirs):\n os.makedirs(upperdirs)\n\n if inf.filename[-1] == '/':\n #Found dir entry in zip\n try :\n os.mkdir(path)\n except OSError as e:\n #Ignore file exists error\n if e.errno != 17: raise e\n else:\n #Do save actual file\n outf = open(path,\"w\")\n outf.write(azip.read(inf.filename))\n outf.close()",
"def decompress_parsed(header_info: CompressedHeaderInfo, data: bytes, *, debug: bool = False) -> bytes:\n\t\n\treturn b\"\".join(decompress_stream_parsed(header_info, io.BytesIO(data), debug=debug))",
"def decompress(src, dest, destsize):\n src = ffi.from_buffer(src)\n dest = ffi.from_buffer(dest)\n return C.blosc_decompress(src, dest, destsize)",
"def _uncompress_file(file_, delete_archive=True):\n print 'extracting data from %s...' % file_\n data_dir = os.path.dirname(file_)\n # We first try to see if it is a zip file\n try:\n if file_.endswith('.zip'):\n z = zipfile.Zipfile(file_)\n z.extractall(data_dir)\n z.close()\n elif file_.endswith('.gz'):\n z = gzip.GzipFile(file_)\n name = os.path.splitext(file_)[0]\n f = file(name, 'w')\n z = f.write(z.read())\n elif file_.endswith('.txt'):\n pass\n else:\n tar = tarfile.open(file_, \"r\")\n tar.extractall(path=data_dir)\n tar.close()\n if delete_archive and not file_.endswith('.txt'):\n os.remove(file_)\n print ' ...done.'\n except Exception as e:\n print 'error: ', e\n raise"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return list of files in ZIP matching pattern
|
def get_files_in_zip(zip_file, pattern):
file_list = list()
lexed = shlex.split("unzip -t \"%s\" \"%s\"" % (zip_file, pattern))
proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=False)
proc.wait()
if proc.returncode != 0:
return None
for line in proc.stdout.read().split("\n"):
if len(line) > 15 and line[0:12] == " testing:":
formated_line = line[13:-2].strip(' ')
file_list.append(formated_line)
return file_list
|
[
"def search_zip(fname, pattern):\n matches = []\n zipf = zipfile.ZipFile(fname, 'r')\n for name in zipf.namelist():\n with zipf.open(name) as f:\n for line in f.readlines():\n if match := pattern.search(line):\n matches.append((fname, name, line, match.group().decode()))\n return matches",
"def list_files():\n return [x for x in os.listdir(DIRECTORY) if x.endswith('.zip')]",
"def get_files(dir_path, pattern=\"\"):\n if os.path.isdir(dir_path):\n archives = []\n for dirpath, dirnames, filenames in os.walk(dir_path):\n for filename in filenames:\n if re.search(pattern, filename):\n archives.append(os.path.join(dirpath, filename))\n return archives\n else:\n raise FileUtilsError(dirErrorMsg + dir_path)",
"def find_files(base, pattern):\n return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]",
"def find_files(base, pattern):\n return [n for n in fnmatch.filter(os.listdir(base), pattern) if\n os.path.isfile(os.path.join(base, n))]",
"def fetchFilesBasedonPattern(**kwargs):\n print(kwargs)\n xcom_data = kwargs[\"ti\"]\n s3_files_paths_list = xcom_data.xcom_pull(key=None, task_ids=\"list_s3_files\")\n print(s3_files_paths_list)\n if s3_files_paths_list:\n return [path for path in s3_files_paths_list if re.search(s3_file_pattern, path)]",
"def FindMatchingFiles(pattern):\n path, _ = os.path.split(pattern)\n if path == \"\":\n path = \".\" # os.listdir fails with empty path\n def match(s): return s.startswith(pattern) and s.endswith(\".h5\")\n return list(filter(match, os.listdir(path)))",
"def files_from_zip(zip_fio: fileIO):\n # Open zip file to read\n with ZipFile(zip_fio, 'r') as f:\n # Extract list of fullpath filenames\n names = f.namelist()\n for name in names:\n # Extract name and extension\n nameext = nameext_from_path(name)\n # If it's not a directory yield nameext and data\n if nameext != '':\n file = f.open(name, 'r')\n yield nameext, b_to_fio(file.read())",
"def zip(content):\n return [item for item in content if item.extension.lower() == 'zip']",
"def list_wheel(wheel_file):\n return [f.filename for f in zipfile.ZipFile(str(wheel_file)).filelist if f.filename.startswith(\"spam/\")]",
"def findallfiles(self, extension):\n\n matches = []\n localfiles = glob.glob(\"%s/local*\" % self.dirname)\n chapterfiles = glob.glob(\"%s/chapters/*tex\" % self.dirname)\n imgfiles = glob.glob(\"%s/figures/*\" % self.dirname)\n for filename in fnmatch.filter(\n localfiles + chapterfiles + imgfiles, \"*.%s\" % extension\n ):\n matches.append(filename)\n return sorted(matches)",
"def search(files, pattern):\n results = []\n if isinstance(files, str):\n with open(files, 'r') as f:\n [results.append(m) for m in re.findall(pattern, f.read())]\n elif isinstance(files, list):\n for file in files:\n with open(file, 'r') as f:\n [results.append(m) for m in re.findall(pattern, f.read())]\n return results",
"def reglob(path, regex):\n return [file for file in os.listdir(path) if re.match(regex, file)]",
"def files(folderpath, pattern=\"*\"):\n return [f for f in folderpath.glob(pattern) if f.is_file()]",
"def _index_files(path):\n with zipfile.ZipFile(path) as zf:\n names = sorted(zf.namelist())\n names = [nn for nn in names if nn.endswith(\".tif\")]\n phasefiles = []\n for name in names:\n with zf.open(name) as pt:\n fd = io.BytesIO(pt.read())\n if SingleRawOAHTif.verify(fd):\n phasefiles.append(name)\n return phasefiles",
"def find_files(pattern, base='.'):\n regex = re.compile(pattern) # 为了效率而编译了它\n matches = list()\n for root, dirs, files in os.walk(base):\n for f in files:\n if regex.match(f):\n matches.append(path.join(root, f))\n return matches",
"def recfind(sdir: str, pattern: str) -> List[str]:\n file_paths = []\n\n for root, dir_names, file_names in os.walk(sdir):\n for file_name in file_names:\n if re.match(pattern, file_name):\n file_path = os.path.join(root, file_name)\n file_paths.append(file_path)\n else:\n continue\n\n return file_paths",
"def find_files_like(datapath, pattern):\n # No need to import these at module level\n from os import listdir\n import re\n\n # Traverse file list and look for `pattern`\n filenames = []\n pattern = re.compile(pattern)\n for file in listdir(datapath):\n if pattern.search(file):\n filenames.append(file)\n\n return filenames",
"def main(filespath, namefilter=''):\n\n os.chdir(filespath)\n count = 0\n for (dirname, _dirs, files) in os.walk(filespath):\n for filename in files:\n if filename.endswith('.zip'): # scan for zip files\n filepath = os.path.join(dirname, filename)\n print('\\n', filepath, '\\n')\n source = zipfile.ZipFile(filepath, 'r') # read zip\n\n # test for bad filename char\n for afile in source.filelist:\n if namefilter:\n if namefilter in afile.filename:\n count += 1\n print(' ', afile.filename)\n else:\n count += 1\n print(' ', afile.filename)\n print('Files counted:\\n', count)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get BIP39 English wordlist
|
def get_wordlist():
with open(WORDLIST_FILE) as english:
wordlist = english.readlines()
return [word.strip() for word in wordlist]
|
[
"def get_word_list(self)->list:\n return self.word_list",
"def get_wordle_list(filename: string) -> string:\n with open(filename, \"r\", encoding=\"utf-8\") as file_handler:\n return file_handler.read()",
"def get_possible_words(self) -> list:\n raise NotImplemented",
"def get_wordlist(self):\n return [w for w in self.words]",
"def get_bag_words() -> List[bytes]:\n words = []\n cpdist = classifier._feature_probdist\n for (fname, fval) in classifier.most_informative_features(10000):\n prob = cpdist['flag', fname].prob(fval) / cpdist['no_flag', fname].prob(fval)\n if prob > 2 and len(fname) > 2:\n words.append(fname)\n return [word.encode('utf-8') for word in words]",
"def brit_spelling(file_path):\n #https://www.lexico.com/grammar/british-and-spelling\n \n f = open(file_path)\n Lines = f.readlines()\n \n b = []\n \n for l in Lines:\n w = l.split('\\t')[0].lower()\n word = w.split()\n for x in word: \n if x != '\\n':\n b.append(x)\n \n return b",
"def get_word_en(self, word):\n request = HttpRequest(dict(urllib=self._urllib))\n options = dict(search_value=word, search_type=SearchType.WORD_EN)\n entries = request.get(options)\n \n return entries",
"def getListOfWords(self):\n returnList = []\n for word in self.setOfWords:\n returnList.append(word)\n return returnList",
"def _get_word_list():\n with open(static_path(NOUN_FILE)) as file:\n nouns = file.readlines()\n\n with open(static_path(ADJECTIVE_FILE)) as file:\n adjectives = file.readlines()\n\n return nouns, adjectives",
"def model_vocabulary(self) -> List[str]:",
"def get_word_list(file_name):\n f = open(file_name, 'r')\n lines = f.readlines()\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line+1:]\n #print(lines)\n wordList = []\n\n for line in lines:\n if line in string.whitespace:\n lines.remove(line)\n else:\n words = line.split()\n for word in words:\n wordList.append(word)\n\n#only uses first 10 lines of book\n\n for line in wordList[0:10]:\n index = 0\n for word in wordList:\n a = word.strip(string.punctuation)\n wordList[index] = a.lower()\n index += 1;\n return wordList",
"def load_words():\n dict_list = []\n with open(DICTIONARY, 'rt') as fin:\n for word in fin:\n dict_list.append(word.strip('\\n'))\n # print(dict_list)\n return dict_list",
"def getWordList(self, prev_word=None):\n if prev_word == None:\n prev_word = \"<S/>\"\n\n word_list = []\n if prev_word in self.class_of_words:\n word_probs = self.calc_probabilities(prev_word)\n word_list = sorted(word_probs, key=word_probs.get, reverse=True)\n # dont predict internal symbols\n word_list = filter(lambda word: word not in self.exclude, word_list)\n word_list = word_list[:MAX_PREDICTED_WORDS]\n\n if len(word_list) <= 0:\n # in case of unkown word just show most frequent\n word_list += self.most_frequent\n\n return word_list",
"def word_searcher(letters_lists, english_words_list):\n\n word_length = len(letters_lists)\n # the current full dictionary used is defined above as word_list_seven_letter_max\n # this is a trimmed version of the nltk dictionary to only include up to 7 letter words\n # special words lists for ! letter and 2 letter words are used to save time\n if word_length == 1:\n words_list = one_letter_eng_words\n elif word_length == 2:\n words_list = two_letter_eng_words\n else:\n words_list = english_words_list\n words_list = list(filter(lambda x: len(x) == word_length, words_list))\n # iteratively trim down the words_list, keeping only words matching the allowed criteria at each index\n for i in range(word_length):\n words_list = [list(filter(lambda x: x[i] == letter, words_list)) for letter in letters_lists[i]]\n words_list = [item for sub_list in words_list for item in sub_list] # flattened list\n return words_list",
"def base_text2words(text):\n words = []\n for w in jieba.cut(text):\n if len(w) > 1 and (is_chinese(w[0]) or is_english(w[0])):\n words.append(w)\n return words",
"def fetch_wordlist() -> list[tuple[str]]:\n\n logger.info(f\"Fetch wordlist from {WORDLIST_URL}\")\n\n response = httpx.get(WORDLIST_URL)\n word_list = response.text.split(\"\\n\")\n return [(word.strip(),) for word in word_list[2:]]",
"def eng_word_candidates(word, language_model):\n res_list = [x[0] for x in language_model[word].most_common()]\n return res_list",
"def get_words(str_or_file):\n return [word.lower()\n for sent in get_sents(str_or_file)\n for word in word_tokenize(sent)]",
"def get_words(document):\n\n raw = parser.from_file(document)\n docwords = raw['content'].split()\n print(docwords)\n\n lowercasewords = []\n\n for word in docwords:\n lowercasewords.append(word.lower())\n\n print(lowercasewords)\n return lowercasewords"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert binary string to hex string. If the binstring provided is not length mod 4, 0 left padding is assumed.
|
def bin2hex(binstring):
if not isinstance(binstring, basestring):
raise ValueError
#return n_bits / 8 if n_bits % 8 == 0 else (n_bits / 8) + 1
n_bits = len(binstring)
hexlen = n_bits / 4 if n_bits % 4 == 0 else (n_bits / 4) + 1
hex_str = hex(int(binstring, 2))[2:].zfill(hexlen) #remove leading 0x
return hex_str[:-1] if hex_str.endswith('L') else hex_str #trailing "L"
|
[
"def binary_string_to_hex(self,binary_string):\n length = len(binary_string)\n if length%4 != 0:\n raise Exception('Length of binary ({}) not divisible by 4. Cannot convert to hex.'.format(length))\n else:\n hex_string = ''\n for i in range(0,length-1,4): ## beginning of each 4 bits (4bit = 1hex)\n binary = binary_string[i:i+4]\n if binary == '0000': hex_val = '0' \n if binary == '0001': hex_val = '1' \n if binary == '0010': hex_val = '2' \n if binary == '0011': hex_val = '3' \n if binary == '0100': hex_val = '4' \n if binary == '0101': hex_val = '5' \n if binary == '0110': hex_val = '6' \n if binary == '0111': hex_val = '7' \n if binary == '1000': hex_val = '8' \n if binary == '1001': hex_val = '9' \n if binary == '1010': hex_val = 'A' \n if binary == '1011': hex_val = 'B' \n if binary == '1100': hex_val = 'C' \n if binary == '1101': hex_val = 'D' \n if binary == '1110': hex_val = 'E' \n if binary == '1111': hex_val = 'F' \n\n hex_string = hex_string + hex_val\n\n return hex_string",
"def from_bin(cls, s, hexstring=False):\n result = ''\n for i in range(0, len(s), 4):\n result += hex(int(s[i:i + 4], 2))[2:]\n return result if hexstring else cls.from_hex(result)",
"def binToHex(binaryString, signed=False):\n\ttry:\n\t\tif (len(binaryString) > 0 and binaryString[0] == \"1\" and signed):\n\t\t\treturn __signedBinToHex(binaryString)\n\t\tdec = abs(binToDec(binaryString))\n\t\treturn decToHex(dec)\n\texcept:\n\t\treturn \"\"",
"def hex_string_to_binary_string(self,hex_string):\n\n # remove newlines and spaces\n hex_string = hex_string.replace(\" \",\"\")\n hex_string = hex_string.replace(\"\\n\",\"\")\n\n if self.debug:\n print(\"CCSDS packet: \",hex_string)\n print()\n\n # convert hex to binary\n hex_length = len(hex_string);\n binary_string = bin(int(hex_string, 16))[2:].zfill((hex_length)*4)\n\n return binary_string",
"def hex2bin(hexstring: str) -> str:\n return ''.join(map(int2bin, hex2int(hexstring)))",
"def str_to_hex(string):\n return string.encode().hex()",
"def string_to_binary(str):\n return bin(int(binascii.hexlify(str.encode()), 16))[2:]",
"def binary_to_hex(bnry):\n return hex(int(bnry,2))[2:]",
"def str_to_hex(string):\n\n return \":\".join(\"{:02x}\".format(ord(c)) for c in string)",
"def str_to_hex(string: str) -> str:\n return hexlify(string.encode()).decode()",
"def hexToBin(hexString):\n\tif len(hexString) == 0: return \"\"\n\tdec = hexToDec(hexString)\n\treturn decToBin(dec)",
"def bin2hex(data):\n data = re.findall(r'[0-1]{4}',''.join(data))\n return map(lambda x: '{0:X}'.format(int(x,2)) , data )",
"def hex_encode(string):\n # Python has flaws, unless i'm doing something wrong..\n return hexlify(string.encode('utf-8')).decode('utf-8')",
"def hex_to_hexstr(input_bytes):\n return helpers.hex_str(input_bytes)",
"def hex_to_binary(self, hex_str: str) -> str:\r\n denary = self.hex_to_denary(hex_str)\r\n cols = sorted([2 ** i for i in range(8)], reverse=True)\r\n binary = []\r\n for num in cols:\r\n if denary - num >= 0:\r\n binary.append(\"1\")\r\n denary -= num\r\n else:\r\n binary.append(\"0\")\r\n\r\n return \"\".join(binary)",
"def bytes_to_hex(byte_str):\n result = \"\"\n for i in byte_str:\n result += int_to_hex(i, 2)\n return result",
"def hexstr(bfid):\n rval = \"x'\" + hexstr_uq(bfid) + \"'\"\n return rval",
"def hex_to_binary(hexa):\n return bin(int(hexa, 16)).zfill(8)",
"def ascii2binary(s):\n #return bin(int.from_bytes(s.encode(), 'big'))[2:] # Doesn't account for padding\n b, buff = \"\", \"\"\n for c in s:\n buff = bin(ord(c))[2:]\n while len(buff) % 8 != 0:\n buff = \"0\" + buff\n b += buff\n return b"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute BIP39 checksum from entropy expressed as binary string
|
def checksum(entropy_binstring):
hasher = hashlib.sha256()
data = decode_binary_string(entropy_binstring)
hasher.update(data)
checksum_hex = hasher.hexdigest()
checksum_bin = hex2bin(checksum_hex)
ent = len(entropy_binstring) / ENT_MOD
return checksum_bin[0:ent]
|
[
"def bin_checksum(s):\n return bin_sha256(bin_sha256(s))[:4]",
"def compute_checksum(bin_msg):\n assert len(bin_msg) > 0\n cksum = 0\n for b in bin_msg:\n cksum += b\n return cksum % 256",
"def Checksum(cls, string):\n # Get the last 10 bits\n c = crc32(string.encode('utf-8')) & (2 ** 10 - 1)\n return (cls.BASE32_ALPHABET[c >> cls.BASE32_BIT_WIDTH] +\n cls.BASE32_ALPHABET[c & (2 ** cls.BASE32_BIT_WIDTH - 1)])",
"def calculate_checksum(source_bytes):\n total_byte_sum = 0\n for byte in source_bytes:\n total_byte_sum += byte\n \n return 128 - (total_byte_sum % 128)",
"def calculate_checksum(self, text):\n\t\tchecksum = 0\n\t\tfor i in range(len(text)):\n\t\t\tchecksum ^= ord(text[i])\n\n\t\treturn \"%x\" % (checksum % 256)",
"def checksum(sentence):\n crc = 0\n for c in sentence:\n crc = crc ^ ord(c)\n crc = crc & 0xFF\n return crc",
"def get_checksum(data):\n return hashlib.sha1(data).hexdigest()",
"def entropy(s):\n b = bytearray.fromhex(s)\n freqs = [c / len(b) for c in Counter(b).values()]\n return -sum(f * math.log2(f) for f in freqs)",
"def test_scl_bcc_with_correct_checksum():\n assert scl.calc_bcc(b'\\x060 91 56 24859 169 11\\x03') == b'\\x12'",
"def hash_v1(string: str, nbins: int) -> int:\n output_bin: int = 7\n for char in string.encode('utf-8'):\n output_bin = (output_bin*31 + char) % nbins\n return output_bin",
"def checksum(value: str) -> str:\n return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]] for index, char in enumerate(value)) % 26)",
"def calculate_checksum(self):\n return binascii.crc32(self.unpack_binary(0, 0x78)) & 0xFFFFFFFF",
"def calculate_checksum(buf):\n checksum = 0\n for byte in bytearray(buf):\n checksum = (checksum + byte) & 0x0000FFFF\n\n return checksum",
"def __computeChecksum(ensemble):\n cs = 0 \n for byte in range(len(ensemble)-2):\n cs += ensemble[byte]\n return cs & 0xffff",
"def hash(bytes):\n return unpack(sha256(bytes).digest())",
"def RecomputeChecksum(packet):\n assert len(packet) == 69, \"Packet length must equal 69\"\n checksum = Checksum(packet[1:64])\n packet = packet[0:64] + chr(checksum % 256) + packet[65:]\n return packet",
"def entropy(hexstring, bits=128, raw=False):\n if not raw:\n onezero = bin(int(hexstring, 16))[2:]\n else:\n onezero = hexstring\n onezero = onezero.zfill(bits)\n assert len(onezero) == bits\n\n length = float(bits)\n prob = [onezero.count('0') / length, onezero.count('1') / length]\n entropy = -sum([p * math.log(p, 2) for p in prob])\n return entropy",
"def calculate_checksum(message):\n\n # Make sure it is a valid hex string\n if len(message) % 2 == 1:\n message = '0' + message\n\n # Get bytes\n message_bytes = bytes.fromhex(message)\n\n # The sum of all the bytes should be 0x55\n check = 0\n for byte in message_bytes:\n check = (check + byte) & 0xFF\n checksum = (0x55 - check) & 0xFF\n return '{:02x}'.format(checksum).upper()",
"def checksum(source_string):\n countTo = (int(len(source_string)/2))*2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (sys.byteorder == \"little\"):\n loByte = source_string[count]\n hiByte = source_string[count + 1]\n else:\n loByte = source_string[count + 1]\n hiByte = source_string[count]\n try: # For Python3\n sum = sum + (hiByte * 256 + loByte)\n except: # For Python2\n sum = sum + (ord(hiByte) * 256 + ord(loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = source_string[len(source_string)-1]\n try: # For Python3\n sum += loByte\n except: # For Python2\n sum += ord(loByte)\n\n sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtain indices in wordlist from binary string
|
def binstring2word_index(binstring):
indices = [int( #interpret chunk as binary string and covert to int
binstring[i*WORDLIST_PIECE_BITS: #take chunk of 11 bits
(i+1)*WORDLIST_PIECE_BITS],
2) for i in range(len(binstring)//WORDLIST_PIECE_BITS)]
return indices
|
[
"def get_indices(mnemonic):\n if len(mnemonic) == 0:\n raise ValueError\n return [get_index_from_word(word) for word in mnemonic.split()]",
"def words_to_indices(self, sentence):\n\t\tindices = []\n\t\tif self.bos:\n\t\t\tindices.append(2)\n\t\tfor word in sentence:\n\t\t\tif word in self.worddict:\n\t\t\t\tindices.append(self.worddict[word])\n\t\t\telse:\n\t\t\t\tindices.append(1)\n\t\tif self.eos:\n\t\t\tindices.append(3)\n\t\treturn indices",
"def sentence_to_indices(sentence, word_dict):\n return [word_dict.to_index(word) for word in sentence.split(' ')]",
"def find_all_indexes(word, letter):\n return [index for index, character in enumerate(word) if character == letter]",
"def word_indices(wordOccuranceVec):\n for idx in wordOccuranceVec.nonzero()[0]:\n for i in range(int(wordOccuranceVec[idx])):\n yield idx",
"def text2index(self, text_array, word2int):\n text2index = []\n for sentence in text_array:\n indexes = []\n for word in sentence.split(' '):\n if word in word2int:\n indexes.append(word2int.get(word))\n else:\n indexes.append(\"1\") # <unk>\n text2index.append(indexes)\n return text2index",
"def get_word_to_ind(words_list):\n return {word: words_list.index(word) for word in words_list}",
"def imdb2indices(inputs):\n X = [] # results\n word2index = imdb.get_word_index()\n word2index = {k:(v+3) for k,v in word2index.items()}\n word2index[\"<PAD>\"], word2index[\"<START>\"], word2index[\"<UNK>\"], word2index[\"<UNUSED>\"] = 0,1,2,3\n for input_ in inputs:\n X.append([])\n for word in input_:\n idx = word2index.get(word, word2index[\"<UNK>\"])\n X[-1].append(idx)\n return X",
"def words2indices(self, words):\n return [self.word2index(w) for w in words]",
"def _find_indexes(self, word: str, text: str):\n temp = re.match(r\"\\[([0-9\\-]{0,}):([0-9\\-]{0,})\\]\", word)\n if temp:\n start = int(temp.group(1)) if temp.group(1) != \"\" else 0\n end = int(temp.group(2)) if temp.group(2) != \"\" else len(text)\n start = len(text) + start if start < 0 else start\n end = len(text) + end if end < 0 else end\n return [(start, end)]\n indexes = []\n index = text.find(word)\n while index != -1:\n indexes.append((index, index + len(word)))\n index = text.find(word, index + len(word))\n return indexes",
"def vowel_indices(word):\n return [i + 1 for i, j in enumerate(word) if j.lower() in \"aeiouy\"]",
"def get_index_from_word(word, wordlist=None):\n if wordlist is None:\n wordlist = get_wordlist()\n for index, word_comp in enumerate(wordlist):\n if word_comp == word:\n return index\n raise InvalidWordError()",
"def hash_indices(self, word):\n hash_bytes = self.hash_fn(word)\n hash_int = int.from_bytes(hash_bytes, 'big')\n\n bit_size = len(hash_bytes) * 8\n qty_left = self.hash_qty\n indices = []\n while bit_size >= self.hash_size and qty_left > 0:\n index = hash_int & self.mask\n byte_idx, bit = divmod(index, 8)\n indices.append((byte_idx, 1 << bit))\n hash_int >>= self.hash_size\n bit_size -= self.hash_size\n qty_left -= 1\n\n return indices",
"def get_char_ids(self, word):\n return [self.char_to_idx[c] for c in word if c in self.char_to_idx]",
"def words_to_indices(self, words):\n assert isinstance(words, list)\n if all(isinstance(word, list) for word in words):\n return [self.words_to_indices(word) for word in words]\n assert all(isinstance(word, six.string_types) for word in words)\n if self.is_case_sensitive:\n return [self.vocabulary.get(word, self.unknown_index)\n for word in words]\n else:\n return [self.vocabulary.get(word.lower(), self.unknown_index)\n for word in words]",
"def Word2Index(self, line):\n indices = []\n for word in line:\n indices.append(self.vocabulary.index(word))\n\n return np.asarray(indices, dtype=\"int32\")",
"def text_to_indices(self, text):\n return [self.letter_index(char) for char in text if char in self]",
"def find_letter_indices(list_of_words, letter):\n # initialize the list\n list_of_indices = []\n\n # condition if none of the letters in a word match the target letter \n for word in list_of_words:\n if letter not in word:\n list_of_indices.append(None)\n\n # move through the letters in the word, and if a given letter matches the\n # target, append the index of that letter in the word to the list of indices.\n # Set i to equal the length of the word (thus ending the iteration,\n # because this function only calls the first time the letter appears).\n else:\n for i, item in enumerate(word):\n if letter == item:\n list_of_indices.append(i)\n i = len(word)\n\n return list_of_indices",
"def get_index_mappings(words):\n return {c: i for i, c in enumerate(words)}, {i: c for i, c in enumerate(words)}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtain 11bit string from word index in [0, 2047]
|
def word_index2binstring(index):
if index < 0 or index > 2047:
raise WordNotDefinedAtIndexError()
return dec2bin(index, zero_padding=11)
|
[
"def findId(n):\n if type(n) != str:\n return None\n indexes = {}\n for i in xrange(len(n)):\n if n[i].isalpha():\n indexes[i] = n[i]\n size = len(indexes.keys())\n numPossible = 2**size\n wordBuilder = [i for i in n]\n \n for i in xrange(numPossible):\n binary = bin(i)\n # Get rid of '0b and add leading zeros'\n binary = binary[2:]\n binary = '0'*(size-len(binary)) + binary\n for key in indexes.keys():\n if binary[counter] == '1':\n wordBuilder[key] = indexes[key].upper()\n else:\n wordBuilder[key] = indexes[key].lower()\n id = tryId(''.join(wordBuilder))\n if id:\n return id\n return None",
"def binstring2word_index(binstring):\n indices = [int( #interpret chunk as binary string and covert to int\n binstring[i*WORDLIST_PIECE_BITS: #take chunk of 11 bits\n (i+1)*WORDLIST_PIECE_BITS],\n 2) for i in range(len(binstring)//WORDLIST_PIECE_BITS)]\n return indices",
"def get_word_from_index(index):\n if index < 0 or index > 2047:\n raise WordNotDefinedAtIndexError()\n return get_wordlist()[index]",
"def get_lsb (string, n):\n return str(string[-n:])",
"def base26_to_binary(word):\r\n message = ''\r\n letters = [char for char in word]\r\n for x in range(len(letters)):\r\n dec_code = ALPHABET.index(letters[x].lower())\r\n bin_code = format(dec_code, 'b')\r\n message += bin_code.zfill(5)\r\n return message",
"def make_bit_substring(N, M, i, j):\n\n N = str(N)\n M = str(M)\n\n return int(N[:len(N)-j-1] + str(M) + N[len(N)-i:])",
"def convert_bit_index(x):\n if x == 666666666:#if x is a non data value\n return 255\n x_string = str(x)\n sum = 0\n for i in range(1,6):\n if str(i) in x_string:\n sum += 2**i\n return sum",
"def word(l, h):\n return (h << 8) + l",
"def get_unsigned_character(data, index):\n result = data[index] & 0xFF\n return result",
"def char_to_bitstring(char):\n return bin(ord(char))[2:].rjust(8,\"0\")",
"def binary2chr(index):\n b = binary2bubble(index)\n if sum(b) != 1:\n return '*'\n else:\n return ind2chr(b.index(1))",
"def get_reserved_psram() -> int:",
"def int2bitstring(x, n):\n x += 2**n # support two's complement\n s = bin(x)[2:] # remove '0b' at the beginning\n s = s.rjust(n, '0')[-n:] # make string of length n\n return ''.join(s)",
"def pass_n(c_text, n):\n most_freqs = (most_freq(seq) for seq in split_n(c_text, n))\n return ''.join(chr(x ^ 32) for x in most_freqs)",
"def returnMemopsText(value):\n \n if value:\n \n wordString = value[:254]\n \n return wordString\n \n else:\n \n return value",
"def _read_bytes_as_string(keytab: str, index: int, bytes_to_read: int):\n offset = bytes_to_read * 2\n end_index = index + offset\n if end_index > len(keytab):\n return '0' # this is the same as get_bytes_number above. when we can't read, return 0\n return bytearray.fromhex(keytab[index:end_index]).decode('UTF-8')",
"def from10to32(string):\r\n \r\n res = \"\"\r\n remainder = int(string)\r\n while remainder > 31:\r\n char = AIC_TABLE[remainder%32].upper()\r\n remainder = remainder//32\r\n res = res + char\r\n res = res + AIC_TABLE[remainder].upper()\r\n res = res[::-1]\r\n return res.zfill(6)",
"def base_N_decode(text, alphabet, padto=None):\n base = len(alphabet) \n long_int = 0L\n for c in text:\n long_int = base * long_int + alphabet.index(c)\n text_out = int_to_string(long_int, padto)\n return text_out",
"def review_encode(string:str):\n encoded = [1]\n for word in string:\n if word.lower() in word_index:\n encoded.append(word_index[word.lower()])\n else:\n encoded.append(2)\n return encoded"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the BIP39 word from the English wordlist at specified 0based index
|
def get_word_from_index(index):
if index < 0 or index > 2047:
raise WordNotDefinedAtIndexError()
return get_wordlist()[index]
|
[
"def word_index2binstring(index):\n if index < 0 or index > 2047:\n raise WordNotDefinedAtIndexError()\n return dec2bin(index, zero_padding=11)",
"def get_word(self):\n # Todo get a list of words fron somewhere\n pass",
"def getindex(wordletter):\n newindexvalue = (alphabet.index(wordletter))\n return newindexvalue",
"def get_word(self, word_str):\n return self.words[word_str]",
"def choose_word(file_path):\r\n # Ask user for index position, a counting number.\r\n word_index = get_counting_number()\r\n # Open file and close automatically.\r\n with open(r\"%s\" % file_path, 'r') as input_file:\r\n input_str = input_file.read()\r\n # Create list from string split by spaces.\r\n words_list = input_str.split(' ')\r\n # If num of words is equal or greater than index number.\r\n if word_index <= len(words_list):\r\n # Word position is at list position, not counting zero.\r\n chosen_word = words_list[word_index - 1]\r\n else:\r\n # Word position is at calculated remainder of num of words\r\n # from index, not counting zero.\r\n chosen_word = words_list[(word_index % len(words_list)) - 1]\r\n return chosen_word",
"def lookup(self, word):",
"def _get_word_index(word_indexer, word_counter, word):\n if word_counter[word] < 1.5:\n return word_indexer.add_and_get_index(\"UNK\")\n else:\n return word_indexer.add_and_get_index(word)",
"def getIndexWord(self):\n return self.__indexKeyWord",
"def get_word():\n word = random.choice(list(english_words_lower_alpha_set))\n return word",
"def get_word(self, sol : str) -> Word:\n for w in self.words:\n if str(w) == sol:\n return w\n print(\"Error: Word not found.\")\n return None",
"def get_token(self, index):\n try:\n return self.index_to_token_dict[index]\n except KeyError:\n # TODO: Uncomment this and do not return UNK\n #print >> sys.stderr, \"Invalid index to vocabulary:\" + str(index)\n return self.unk",
"def get_embedding(word, nlp):\n token = nlp(word)\n return token.vector",
"def getWord(self, wid):\n try:\n return self._wids.get(wid)[0]\n except KeyError:\n return None",
"def get_word_by_clue(self, clue : str) -> Word:\n for w in self.words:\n if w.clue == clue:\n return w\n print(\"Error: Word not found.\")\n return None",
"def get_word_index(self, word):\n if self.contain(word):\n return self.dict[word]\n else:\n raise ValueError('Cannot find the word: {0}'.format(word))",
"def get_index_from_word(word, wordlist=None):\n if wordlist is None:\n wordlist = get_wordlist()\n for index, word_comp in enumerate(wordlist):\n if word_comp == word:\n return index\n raise InvalidWordError()",
"def get_word_from_current_frad(self, wordNum):\n\n\t\tif wordNum >= self.wordsPerFrame:\n\t\t\tprint \"wordNum out of bounds in FradStructure:get_word_in_current_frame()\"\n\t\telse:\n\t\t\treturn self.fradStructure[self.type][self.topBottom][self.row][self.column][self.minor][wordNum]",
"def _get_word_index(self, word):\n # Load vocabulary if necessary\n if not hasattr(self, 'vocabulary_'):\n self._load_vocabulary()\n\n assert hasattr(self, 'vocabulary_')\n\n if word is not None and word in self.vocabulary_:\n return self.vocabulary_[word]\n\n return -1",
"def listPosition(word):\n return word_order(word)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the 0based index of a word in English wordlist
|
def get_index_from_word(word, wordlist=None):
if wordlist is None:
wordlist = get_wordlist()
for index, word_comp in enumerate(wordlist):
if word_comp == word:
return index
raise InvalidWordError()
|
[
"def getindex(wordletter):\n newindexvalue = (alphabet.index(wordletter))\n return newindexvalue",
"def listPosition(word):\n return word_order(word)",
"def get_word_index(self, word):\n if self.contain(word):\n return self.dict[word]\n else:\n raise ValueError('Cannot find the word: {0}'.format(word))",
"def _get_word_index(self, word):\n # Load vocabulary if necessary\n if not hasattr(self, 'vocabulary_'):\n self._load_vocabulary()\n\n assert hasattr(self, 'vocabulary_')\n\n if word is not None and word in self.vocabulary_:\n return self.vocabulary_[word]\n\n return -1",
"def get_word_to_ind(words_list):\n return {word: words_list.index(word) for word in words_list}",
"def get_word_from_index(index):\n if index < 0 or index > 2047:\n raise WordNotDefinedAtIndexError()\n return get_wordlist()[index]",
"def _get_word_index(word_indexer, word_counter, word):\n if word_counter[word] < 1.5:\n return word_indexer.add_and_get_index(\"UNK\")\n else:\n return word_indexer.add_and_get_index(word)",
"def _find_word(words_list, search_list, start=0):\n for index, word in enumerate(words_list[start:]):\n if word in search_list:\n return index+start\n return None",
"def find(word, letter, start):\n\tindex = start\n\twhile index < len(word):\n\t\tif word[index] == letter:\n\t\t\treturn index\n\t\tindex = index + 1\n\treturn - 1",
"def word_value(word):\n\tword_value = 0\n\tfor letter in word:\n\t\tword_value += (liste_letter_value.index(letter)+1)\n\treturn word_value",
"def getIndexWord(self):\n return self.__indexKeyWord",
"def get_position_at_word_id(self, word_id):\n for index, item in enumerate(self.chineseword_set.all()):\n if item.id == word_id:\n return index\n logging.error(f'word_id not found {word_id}')\n # TODO need to handle better in case of error, ideally redirect user to start-page\n return index",
"def calc_rank(word, lst):\n for idx in range(0, len(lst)):\n if lst[idx].name == word:\n return idx + 1",
"def build_firstword_index(sentences):\n index = defaultdict(list)\n for i in range(len(sentences)):\n tokens = utils.tokenize(sentences[i])\n index[tokens[1]].append(i) #Excluding start tokens\n return index",
"def index_words_typical(text):\n result = []\n if text:\n result.append(0)\n for index, letter in enumerate(text):\n if letter == ' ':\n result.append(index+1)\n return result",
"def index_of_masked_word(sentence, bert):\n tokens = bert.tokenize(sentence)\n try:\n return tokens.index(MASK)\n except ValueError: # MASK not in sentence\n return -1",
"def listPosition(word: str):\n def perm(items, letter):\n pos = items.index(letter)\n return factorial(len(items) - 1) * pos\n\n result = 1\n sort_list = sorted(word)\n for i in range(len(word)):\n result += perm(sort_list, word[i])\n sort_list.pop(sort_list.index(word[i]))\n return result",
"def indice(elt, liste):\n for n, x in enumerate(liste):\n if x == elt:\n return n\n return -1",
"def __getindex__(self, token: str):\n if token in self.token_to_index:\n return(self.token_to_index[token])\n else:\n return(self.token_to_index[UNK])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of word indices, get full mnemonic from English wordlist
|
def get_mnemonic(indices):
if len(indices) == 0:
raise ValueError
return " ".join([get_word_from_index(index) for index in indices])
|
[
"def get_indices(mnemonic):\n if len(mnemonic) == 0:\n raise ValueError\n return [get_index_from_word(word) for word in mnemonic.split()]",
"def decode_indices(indices, vocabulary):\n\n decoded_tokens = [vocabulary[index] for index in indices]\n return \" \".join(decoded_tokens)",
"def idx2word(self, indexes, TEXT):\n bptt, bsz = indexes.shape\n \n output = [ [ 0 for i in range(bsz)] for j in range(bptt)]\n \n for timeStep in range(bptt):\n for batch in range(bsz):\n output[timeStep][batch] = TEXT.vocab.itos[indexes[timeStep, batch].cpu().long().item()]\n\n \n \n return(output)",
"def indices2words(self, indices):\n return [self.index2word(i) for i in indices]",
"def id_list_to_word(self, id_list):\n return [self.vocab_dict[word_id] if word_id != 0 else '%UNKNOWN%' for word_id in id_list]",
"def build_soundex_index(index):\n\n print('\\n\\n\\n Building SOUNDEX -\\n\\n\\n')\n\n soundex_index = {}\n for word in index:\n word_soundex = soundex(word)\n if word_soundex in soundex_index:\n soundex_index[word_soundex].append(word)\n else:\n soundex_index[word_soundex] = [word]\n\n print('\\n\\n\\n SOUNDEX built \\n\\n\\n')\n return soundex_index",
"def build_firstword_index(sentences):\n index = defaultdict(list)\n for i in range(len(sentences)):\n tokens = utils.tokenize(sentences[i])\n index[tokens[1]].append(i) #Excluding start tokens\n return index",
"def findId(n):\n if type(n) != str:\n return None\n indexes = {}\n for i in xrange(len(n)):\n if n[i].isalpha():\n indexes[i] = n[i]\n size = len(indexes.keys())\n numPossible = 2**size\n wordBuilder = [i for i in n]\n \n for i in xrange(numPossible):\n binary = bin(i)\n # Get rid of '0b and add leading zeros'\n binary = binary[2:]\n binary = '0'*(size-len(binary)) + binary\n for key in indexes.keys():\n if binary[counter] == '1':\n wordBuilder[key] = indexes[key].upper()\n else:\n wordBuilder[key] = indexes[key].lower()\n id = tryId(''.join(wordBuilder))\n if id:\n return id\n return None",
"def build_vocab(sentences):\r\n # Build vocabulary\r\n word_counts = Counter(itertools.chain(*sentences))\r\n # Mapping from index to word\r\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\r\n # Mapping from word to index\r\n\r\n return vocabulary_inv",
"def get_informative_words(nb_model):\n words = nb_model.decades[1930].keys()\n freq_not_zero = np.zeros((len(DECADES), len(words)))\n for i, dec in enumerate(DECADES):\n for j, word in enumerate(words):\n freq_not_zero[i,j] = 1.0 - nb_model.decades[dec][word][0]\n scores = np.where(freq_not_zero!=0, freq_not_zero, nb_model.dirichlet)\n scores /= np.min(scores, axis = 0)\n best_words = {}\n for i, dec in enumerate(DECADES):\n indices = np.argsort(scores[i,:])[-100:]\n best_words[dec] = [words[index] for index in list(indices)]\n return best_words",
"def imdb2indices(inputs):\n X = [] # results\n word2index = imdb.get_word_index()\n word2index = {k:(v+3) for k,v in word2index.items()}\n word2index[\"<PAD>\"], word2index[\"<START>\"], word2index[\"<UNK>\"], word2index[\"<UNUSED>\"] = 0,1,2,3\n for input_ in inputs:\n X.append([])\n for word in input_:\n idx = word2index.get(word, word2index[\"<UNK>\"])\n X[-1].append(idx)\n return X",
"def get_word_to_ind(words_list):\n return {word: words_list.index(word) for word in words_list}",
"def generate_lookup_entries(w, max_edit_distance=0):\n result = {w}\n queue = {w}\n for d in range(max_edit_distance):\n temp_queue = set()\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in result:\n result.add(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.add(word_minus_c)\n queue = temp_queue\n return result",
"def get_word_from_index(index):\n if index < 0 or index > 2047:\n raise WordNotDefinedAtIndexError()\n return get_wordlist()[index]",
"def tokens_from_index_list(index_list, id2vocab):\n token_list = []\n for i in range(len(index_list)):\n if index_list[i] > len(id2vocab)-1:\n token_list.append(\"<UNK>\")\n else:\n token_list.append(id2vocab[index_list[i]])\n return token_list",
"def get_index_mappings(words):\n return {c: i for i, c in enumerate(words)}, {i: c for i, c in enumerate(words)}",
"def mnemonic_comp(comp):\n \n mapping = {\n \"0\" : \"101010\",\n \"1\" : \"111111\",\n \"-1\" : \"111010\",\n \"D\" : \"001100\",\n \"M\" : \"110000\",\n \"!D\" : \"001101\",\n \"!M\" : \"110001\",\n \"-D\" : \"001111\",\n \"-M\" : \"110011\",\n \"D+1\" : \"011111\",\n \"M+1\" : \"110111\",\n \"D-1\" : \"001110\",\n \"M-1\" : \"110010\",\n \"D+M\" : \"000010\",\n \"D-M\" : \"010011\",\n \"M-D\" : \"000111\",\n \"D&M\" : \"000000\",\n \"D|M\" : \"010101\",\n } \n return mapping[comp]",
"def gen_mnemonic(num_words:int)->str:\n if num_words < 12 or num_words > 24 or num_words%3 != 0:\n raise RuntimeError(\"Invalid word count\")\n return bip39.mnemonic_from_bytes(rng.get_random_bytes(num_words*4//3))",
"def binstring2mnemonic(entropy_bin):\n checksum_bin = checksum(entropy_bin)\n combined_bin = \"{0}{1}\".format(entropy_bin, checksum_bin)\n indices = binstring2word_index(combined_bin)\n mnemonic = get_mnemonic(indices)\n return mnemonic"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a mnemonic sentence, get the word indices for the English wordlist
|
def get_indices(mnemonic):
if len(mnemonic) == 0:
raise ValueError
return [get_index_from_word(word) for word in mnemonic.split()]
|
[
"def words_to_indices(self, sentence):\n\t\tindices = []\n\t\tif self.bos:\n\t\t\tindices.append(2)\n\t\tfor word in sentence:\n\t\t\tif word in self.worddict:\n\t\t\t\tindices.append(self.worddict[word])\n\t\t\telse:\n\t\t\t\tindices.append(1)\n\t\tif self.eos:\n\t\t\tindices.append(3)\n\t\treturn indices",
"def sentence_to_indices(sentence, word_dict):\n return [word_dict.to_index(word) for word in sentence.split(' ')]",
"def sentences2idx(sentences, words):\n seq1 = []\n for i in sentences:\n seq1.append(getSeq(i,words))\n x1,m1 = prepare_data(seq1)\n return x1, m1",
"def sentences2idx(sentences, words):\r\n seq1 = []\r\n for sent in sentences:\r\n seq1.append(getSeq(sent,words)) # seq is a list of word indices which are in sentences\r\n x1,m1 = prepare_data(seq1)\r\n# print('x shape: {}\\nm shape: {}'.format(x1.shape, m1.shape))\r\n return x1, m1",
"def sentences_to_indices(X, word_to_index, max_len):\n m = X.shape[0] # number of training examples\n ### START CODE HERE ###\n # Initialize X_indices as a numpy matrix of zeros and the correct shape\n X_indices = np.zeros((m, max_len))\n for i in range(m): # loop over training examples\n # Loop over the words of sentence_words\n for j, w in enumerate(X[i]):\n # Set the (i,j)th entry of X_indices to the index of the correct word.\n X_indices[i, j] = word_to_index[w] + 1\n ### END CODE HERE ###\n\n return X_indices",
"def build_inverted_index(sentences):\n index = defaultdict(list)\n for i in range(len(sentences)):\n for w in utils.tokenize(sentences[i]):\n index[w].append(i)\n return index",
"def build_firstword_index(sentences):\n index = defaultdict(list)\n for i in range(len(sentences)):\n tokens = utils.tokenize(sentences[i])\n index[tokens[1]].append(i) #Excluding start tokens\n return index",
"def imdb2indices(inputs):\n X = [] # results\n word2index = imdb.get_word_index()\n word2index = {k:(v+3) for k,v in word2index.items()}\n word2index[\"<PAD>\"], word2index[\"<START>\"], word2index[\"<UNK>\"], word2index[\"<UNUSED>\"] = 0,1,2,3\n for input_ in inputs:\n X.append([])\n for word in input_:\n idx = word2index.get(word, word2index[\"<UNK>\"])\n X[-1].append(idx)\n return X",
"def sentences_to_indices(X, word_to_index, max_len):\n \n m = X.shape[0] # number of training examples\n \n # Initialize X_indices as a numpy matrix of zeros and the correct shape (1 line)\n X_indices = np.zeros((m,max_len))\n \n for i in range(m): # loop over training examples\n \n # Convert the ith training sentence in lower case and split is into words. You should get a list of words.\n sentence_words = X[i].lower().split(' ')\n # Initialize j to 0\n j = 0\n for w in sentence_words:\n # Set the (i,j)th entry of X_indices to the index of the correct word.\n if w in word_to_index.keys():\n X_indices[i, j] = word_to_index[w]\n # Increment j to j + 1\n j = j+1\n \n return X_indices",
"def get_word_to_ind(words_list):\n return {word: words_list.index(word) for word in words_list}",
"def words2indices(self, words):\n return [self.word2index(w) for w in words]",
"def word_indices(wordOccuranceVec):\n for idx in wordOccuranceVec.nonzero()[0]:\n for i in range(int(wordOccuranceVec[idx])):\n yield idx",
"def words_to_indices(self, words):\n assert isinstance(words, list)\n if all(isinstance(word, list) for word in words):\n return [self.words_to_indices(word) for word in words]\n assert all(isinstance(word, six.string_types) for word in words)\n if self.is_case_sensitive:\n return [self.vocabulary.get(word, self.unknown_index)\n for word in words]\n else:\n return [self.vocabulary.get(word.lower(), self.unknown_index)\n for word in words]",
"def find_all_indexes(word, letter):\n return [index for index, character in enumerate(word) if character == letter]",
"def text2index(self, text_array, word2int):\n text2index = []\n for sentence in text_array:\n indexes = []\n for word in sentence.split(' '):\n if word in word2int:\n indexes.append(word2int.get(word))\n else:\n indexes.append(\"1\") # <unk>\n text2index.append(indexes)\n return text2index",
"def Word2Index(self, line):\n indices = []\n for word in line:\n indices.append(self.vocabulary.index(word))\n\n return np.asarray(indices, dtype=\"int32\")",
"def get_index_mappings(words):\n return {c: i for i, c in enumerate(words)}, {i: c for i, c in enumerate(words)}",
"def find_letter_indices(list_of_words, letter):\n # initialize the list\n list_of_indices = []\n\n # condition if none of the letters in a word match the target letter \n for word in list_of_words:\n if letter not in word:\n list_of_indices.append(None)\n\n # move through the letters in the word, and if a given letter matches the\n # target, append the index of that letter in the word to the list of indices.\n # Set i to equal the length of the word (thus ending the iteration,\n # because this function only calls the first time the letter appears).\n else:\n for i, item in enumerate(word):\n if letter == item:\n list_of_indices.append(i)\n i = len(word)\n\n return list_of_indices",
"def mapWords2indices(self):\n for row in range(self.dataset.shape[0]):\n words2indices = []\n for word in self.dataset[row, 0].split():\n words2indices.append(self.word2index[word])\n \n # Append the end of the sentence token\n if self.eos_token:\n words2indices.append(self.word2index[self.eos_token])\n \n self.dataset[row, 0] = np.array(words2indices)\n \n # Map strings from target column\n if self.target_col:\n for row in range(self.dataset.shape[0]):\n words2indices = []\n \n # Insert the start of the sentence token\n if self.sos_token:\n words2indices.append(self.word2index[self.sos_token])\n \n for word in self.dataset[row, self.target_col].split():\n words2indices.append(self.word2index[word])\n\n \n # Append the end of the sentence token\n if self.eos_token:\n words2indices.append(self.word2index[self.eos_token])\n \n self.dataset[row, self.target_col] = np.array(words2indices)\n \n print('Mapped words to indices')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert complete mnemonic setence to binstring and verify checksum. The returned value will not include the checksum.
|
def mnemonic2binstring(mnemonic, print_warning=True):
if mnemonic == '':
raise ValueError
binstring = ''
wordlist = get_wordlist()
for word in mnemonic.split():
index = get_index_from_word(word, wordlist=wordlist)
binstring += word_index2binstring(index)
if len(binstring) % 1.03125 != 0:
if print_warning:
print "WARNING: Length of decoded mnemonic inconsistent with proper length!"
ent = int(len(binstring) / 1.03125)
raw_entropy = binstring[0:ent]
checksum_val = binstring[ent:]
computed_checksum = checksum(raw_entropy)
if checksum_val != computed_checksum:
raise FailedCheckSumError()
return raw_entropy
|
[
"def mne2bin(mnemonic):\n val = 0\n for ch in mnemonic:\n val = (val << 6) | M2B[ch]\n s = \"\"\n while val > 0:\n s += chr(val & 0xFF)\n val >>= 8\n r = \"'\"\n for ch in s:\n r += \"\\\\x%02X\" % ord(ch)\n r += \"'\"\n return r",
"def test_scl_bcc_with_correct_checksum():\n assert scl.calc_bcc(b'\\x060 91 56 24859 169 11\\x03') == b'\\x12'",
"def binstring2mnemonic(entropy_bin):\n checksum_bin = checksum(entropy_bin)\n combined_bin = \"{0}{1}\".format(entropy_bin, checksum_bin)\n indices = binstring2word_index(combined_bin)\n mnemonic = get_mnemonic(indices)\n return mnemonic",
"def bin_checksum(s):\n return bin_sha256(bin_sha256(s))[:4]",
"def compute_nmea_check_sum(self, frame, start_index=1, end_index=-5):\n checksum = 0\n for s in frame[1:-5].decode(self.ENCODING, self.UNICODE_HANDLING):\n checksum ^= ord(s)\n return checksum",
"def checksum(entropy_binstring):\n hasher = hashlib.sha256()\n data = decode_binary_string(entropy_binstring)\n hasher.update(data)\n checksum_hex = hasher.hexdigest()\n checksum_bin = hex2bin(checksum_hex)\n\n ent = len(entropy_binstring) / ENT_MOD\n return checksum_bin[0:ent]",
"def comp(self, mnemonic: str) -> str:\n if mnemonic == \"0\":\n result = \"0101010\"\n elif mnemonic == \"1\":\n result = \"0111111\"\n elif mnemonic == \"-1\":\n result = \"0111010\"\n elif mnemonic == \"D\":\n result = \"0001100\"\n elif mnemonic == \"A\":\n result = \"0110000\"\n elif mnemonic == \"M\":\n result = \"1110000\"\n elif mnemonic == \"!D\":\n result = \"0001101\"\n elif mnemonic == \"!A\":\n result = \"0110001\"\n elif mnemonic == \"!M\":\n result = \"1110001\"\n elif mnemonic == \"-D\":\n result = \"0001111\"\n elif mnemonic == \"-A\":\n result = \"0110011\"\n elif mnemonic == \"-M\":\n result = \"1110011\"\n elif mnemonic == \"D+1\":\n result = \"0011111\"\n elif mnemonic == \"A+1\":\n result = \"0110111\"\n elif mnemonic == \"M+1\":\n result = \"1110111\"\n elif mnemonic == \"D-1\":\n result = \"0001110\"\n elif mnemonic == \"A-1\":\n result = \"0110010\"\n elif mnemonic == \"M-1\":\n result = \"1110010\"\n elif mnemonic == \"D+A\":\n result = \"0000010\"\n elif mnemonic == \"D+M\":\n result = \"1000010\"\n elif mnemonic == \"D-A\":\n result = \"0010011\"\n elif mnemonic == \"D-M\":\n result = \"1010011\"\n elif mnemonic == \"A-D\":\n result = \"0000111\"\n elif mnemonic == \"M-D\":\n result = \"1000111\"\n elif mnemonic == \"D&A\":\n result = \"0000000\"\n elif mnemonic == \"D&M\":\n result = \"1000000\"\n elif mnemonic == \"D|A\":\n result = \"0010101\"\n else:\n result = \"1010101\"\n return result",
"def digest_converter(self, digest):\r\n binary = bin(int(digest, 16))[2:].zfill(len(digest * 4))\r\n return binary",
"def as_bin_str(self):\n return \"\".join(format(b, \"0>8b\") for b in six.iterbytes(self.key))",
"def test_checksum(self):",
"def get_checksum(self):\n\n return self.two_letter_boxes * self.three_letter_boxes",
"def test__checksum(self):\n # Test\n result = converter._checksum(1, 2, 3)\n expected = ('''\\\n3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7\\\nba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2''')\n self.assertEqual(result, expected)",
"def bin_format(self) -> str:",
"def testHex2Bin(self):\n\n self.assertEqual(hex2Bin(0xa), '0b1010')",
"def checksum (self, label, value):\n\t\tsum = 32\n\t\tfor c in label + value: sum += ord(c)\n\t\treturn chr((sum & 63) + 32)",
"def to_binary(self):\n if self.size not in VAR_PREFIXES:\n return \"0\" * (self.size - len(bin(self.value)[2:])) + bin(self.value)[2:]",
"def get_checksum(data):\n return hashlib.sha1(data).hexdigest()",
"def calculate_nmea_checksum(nmea_line):\n # \n # xor all characters in the message to get a one byte checksum.\n # don't include starting '$' or trailing checksum '*##'\n #\n return reduce(operator.xor, map(ord, nmea_line[1:-3]), 0)",
"def mnemonic_to_seed(mnemonic):\n\n if not validate_mnemonic(mnemonic):\n raise ValueError(\"Mnemonic is not valid\")\n\n words = mnemonic.lower().split()\n seed_hexstring = ''\n y = 0\n for x in range(16):\n # TODO: Use a look up to improve efficiency\n n = format(wordlist.index(words[y]), '012b') + format(wordlist.index(words[y + 1]), '012b')\n seed_hexstring += chr(int(n[:8], 2)) + chr(int(n[8:16], 2)) + chr(int(n[16:], 2))\n y += 2\n return seed_hexstring"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert raw entropy as binary string (sans checksum) to bip39 mnemonic
|
def binstring2mnemonic(entropy_bin):
checksum_bin = checksum(entropy_bin)
combined_bin = "{0}{1}".format(entropy_bin, checksum_bin)
indices = binstring2word_index(combined_bin)
mnemonic = get_mnemonic(indices)
return mnemonic
|
[
"def mnemonic2binstring(mnemonic, print_warning=True):\n if mnemonic == '':\n raise ValueError\n binstring = ''\n wordlist = get_wordlist()\n for word in mnemonic.split():\n index = get_index_from_word(word, wordlist=wordlist)\n binstring += word_index2binstring(index)\n\n if len(binstring) % 1.03125 != 0:\n if print_warning:\n print \"WARNING: Length of decoded mnemonic inconsistent with proper length!\"\n\n ent = int(len(binstring) / 1.03125)\n raw_entropy = binstring[0:ent]\n checksum_val = binstring[ent:]\n computed_checksum = checksum(raw_entropy)\n if checksum_val != computed_checksum:\n raise FailedCheckSumError()\n\n return raw_entropy",
"def mne2bin(mnemonic):\n val = 0\n for ch in mnemonic:\n val = (val << 6) | M2B[ch]\n s = \"\"\n while val > 0:\n s += chr(val & 0xFF)\n val >>= 8\n r = \"'\"\n for ch in s:\n r += \"\\\\x%02X\" % ord(ch)\n r += \"'\"\n return r",
"def mnemonic_to_seed(mnemonic):\n\n if not validate_mnemonic(mnemonic):\n raise ValueError(\"Mnemonic is not valid\")\n\n words = mnemonic.lower().split()\n seed_hexstring = ''\n y = 0\n for x in range(16):\n # TODO: Use a look up to improve efficiency\n n = format(wordlist.index(words[y]), '012b') + format(wordlist.index(words[y + 1]), '012b')\n seed_hexstring += chr(int(n[:8], 2)) + chr(int(n[8:16], 2)) + chr(int(n[16:], 2))\n y += 2\n return seed_hexstring",
"def base26_to_binary(word):\r\n message = ''\r\n letters = [char for char in word]\r\n for x in range(len(letters)):\r\n dec_code = ALPHABET.index(letters[x].lower())\r\n bin_code = format(dec_code, 'b')\r\n message += bin_code.zfill(5)\r\n return message",
"def string_to_binary(str):\n return bin(int(binascii.hexlify(str.encode()), 16))[2:]",
"def bin_to_64(BinaryLit):\n return base64.b64encode(BinaryLit).decode('ascii')",
"def human_bytes(byte_string):\n return base64.b32encode(byte_string).strip('=').lower()",
"def ascii2binary(s):\n #return bin(int.from_bytes(s.encode(), 'big'))[2:] # Doesn't account for padding\n b, buff = \"\", \"\"\n for c in s:\n buff = bin(ord(c))[2:]\n while len(buff) % 8 != 0:\n buff = \"0\" + buff\n b += buff\n return b",
"def unpibble32(text: str) -> bytes:\n encoded: bytes = bytes(text, \"ascii\")\n table: bytes = bytes.maketrans(\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n )\n return base64.b32decode(encoded.translate(table))",
"def encode_to_b16(inp: str) -> bytes:\n encoded = inp.encode(\"utf-8\") # encoded the input (we need a bytes like object)\n b16encoded = base64.b16encode(encoded) # b16encoded the encoded string\n return b16encoded",
"def as_bin_str(self):\n return \"\".join(format(b, \"0>8b\") for b in six.iterbytes(self.key))",
"def ascii2bin(asciistring: str) -> str:\n return ''.join(map(int2bin, ascii2int(asciistring)))",
"def encode_string(S):\n if S != '':\n S = '{0:b}'.format(int(hexlify(S), 16))\n while (len(S) % 8) != 0:\n S = '0' + S\n if (len(S) >= 0) and (len(S) < 2040):\n U = left_encode(len(S)) + S\n return U\n else:\n print ('Invalid bit string (encode_string)')",
"def convert_binary_data(tree, input_string):\n\n\tbinary_string = '' #string of binary characters to be written to compressed file\n\tfor char in input_string: \n\t\tbinary_string += tree[char] #for each character append corresponding huffman code to binary_string\n\n\tbinary_tree = encoded_huffman_tree(tree) #generate the encoded huffman tree (in binary)\n\tbinary_string = binary_tree\t+ binary_string #add this infront of the data so that it can be regerated\n\n\tno_padding_bits_dec = (8-((len(binary_string)+3)%8))%8 #data stored in bytes so add calculate number of padding bits needed\n\tno_padding_bits_bin = \"{:03b}\".format(no_padding_bits_dec) #max number of padding bits can be 7 so store this in 3 bits \n\n\tbinary_string = no_padding_bits_bin + binary_string + (no_padding_bits_dec*'0') # add the number of padding bits, data, padding bits\n\n\tbinary_string = BitArray(bin=binary_string) #turn into byte array that can be written to .bin file\n\n\treturn binary_string",
"def pibble32(data: bytes) -> str:\n table: bytes = bytes.maketrans(\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n )\n encoded: bytes = base64.b32encode(data)\n return str(encoded.translate(table), \"ascii\")",
"def char_to_bitstring(char):\n return bin(ord(char))[2:].rjust(8,\"0\")",
"def base32Decode(String):\r\n \"\"\"Here we go from 8 5-bit bytes to 5 8-bit bytes...\"\"\"\r\n \"\"\"Initialize Local Varioables...\"\"\"\r\n Output = \"\"\r\n \"\"\"Validate the parameter...\"\"\"\r\n Success = type(String) == str\r\n if Success:\r\n \"\"\"Initialize Local Variables...\"\"\"\r\n Alphabet = baseScheme(32)\r\n WordSize = 8 # 8 5-bit words...\r\n Length = len(String)\r\n\r\n Word = \"\"\r\n WordStart = WordLength = Nibble = Bits = 0\r\n Byte1 = Byte2 = Byte3 = Byte4 = Byte5 = 0\r\n\r\n for WordStart in range(0, Length, WordSize):\r\n\r\n Word = String[WordStart:WordStart + WordSize]\r\n WordLength = len(Word)\r\n Byte1 = Byte2 = Byte3 = Byte4 = Byte5 = 0\r\n\r\n for Nibble in range(WordLength):\r\n Bits = Alphabet.find(Word[Nibble])\r\n if Nibble == 0:\r\n \"\"\"11111 << 3 = 11111000\"\"\"\r\n Byte1 = Bits << 3\r\n elif Nibble == 1:\r\n \"\"\"11100 >> 2 = 00000111\"\"\"\r\n Byte1 = Byte1 + (Bits >> 2)\r\n Output = Output + chr(Byte1)\r\n \"\"\"00011 & 2**2-1 << 6 = 11000000\"\"\"\r\n Byte2 = (Bits & (2**2-1)) << 6\r\n elif Nibble == 2:\r\n \"\"\"11111 << 1 = 00111110\"\"\"\r\n Byte2 = Byte2 + (Bits << 1)\r\n elif Nibble == 3:\r\n \"\"\"10000 >> 4 = 00000001\"\"\"\r\n Byte2 = Byte2 + (Bits >> 4)\r\n Output = Output + chr(Byte2)\r\n \"\"\"01111 & 2**4-1 << 4 = 11110000\"\"\"\r\n Byte3 = (Bits & (2**4-1)) << 4\r\n elif Nibble == 4:\r\n \"\"\"11110 >> 1 = 00001111\"\"\"\r\n Byte3 = Byte3 + (Bits >> 1)\r\n Output = Output + chr(Byte3)\r\n \"\"\"00001 & 2**1-1 << 7 = 10000000\"\"\"\r\n Byte4 = (Bits & (2**1-1)) << 7\r\n elif Nibble == 5:\r\n \"\"\"11111 << 2 = 01111100\"\"\"\r\n Byte4 = Byte4 + (Bits << 2)\r\n elif Nibble == 6:\r\n \"\"\"11000 >> 3 = 00000011\"\"\"\r\n Byte4 = Byte4 + (Bits >> 3)\r\n Output = Output + chr(Byte4)\r\n \"\"\"00111 & 2**3-1 << 5 = 11100000\"\"\"\r\n Byte5 = (Bits & (2**3-1)) << 5\r\n elif Nibble == 7:\r\n \"\"\"11111 = 00011111\"\"\"\r\n Byte5 = Byte5 + Bits\r\n Output = Output + chr(Byte5)\r\n return Output",
"def encoded_huffman_tree(tree):\n\n\tbinary_string = '' #huffman tree in binary form stored as string\n\tno_keys = 0 #count number of item in huffman tree, needed for decompression\n\tfor item in tree:\n\t\tkey = [bin(ord(x))[2:].zfill(16) for x in item][0] #convert each key into 16 bit ascii\n\t\tno_bits = \"{:08b}\".format(len(tree[item])) #convert the number of bits used for each huffman code to binary\n\t\tcode = tree[item] #get huffman code\n\t\tno_keys +=1\n\t\tbinary_string += key+no_bits+code #item in tree is stored as | key | length of code | code | \n\n\tno_keys = \"{:08b}\".format(no_keys) #number of items in huffman tree in binary form\n\n\tbinary_string = no_keys+binary_string \n\n\treturn binary_string",
"def binary2ascii(b):\n #n = int(f\"0b{s}\", 2)\n #return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode() # Doesn't account for padding\n s, buff, i = \"\", \"\", 0\n for char in b:\n buff += char\n i += 1\n if i % 8 == 0:\n s += chr(int(buff, 2))\n buff = \"\"\n return s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
take the array of numbers stored in arr and return the string true if any combination of numbers in the array can be added up to equal the largest number
|
def array_addition(lst):
greatest = max(lst)
sorted_nums = sorted(lst)
without_greatest = sorted_nums[:-1]
total_sums = []
idx = 1
while idx < len(without_greatest):
perms = list(itertools.permutations(without_greatest, idx))
for perm in perms:
if sum(perm) == greatest:
return True
idx += 1
return False
|
[
"def descending(array):\n for index in range(array.size() - 1):\n if arr[index] <= arr[index + 1]:\n return False\n\n return True",
"def _comb_long(c, nmax):\n if nmax == 0:\n return []\n c = np.asanyarray(c)\n return np.concatenate([c >= o + 1 for o in range(nmax)])",
"def check(array):\n val = [1,2,3,4,5,6,7,8,9]\n array.sort()\n if val == array:\n res = True\n else:\n res = False\n \n return res",
"def MaximalSquare(strArr):\n # code goes here\n # opt 1\n # rows = len(strArr)\n # columns = len(strArr[0]) if rows > 0 else 0\n\n # dp = [[0 for j in range(columns)] for i in range(rows)]\n # maxlen = 0\n\n # for i in range(rows):\n # for j in range(columns):\n # if i == 0 or j == 0:\n # dp[i][j] = int(strArr[i][j])\n # if i > 0 and j > 0 and int(strArr[i][j]) == 1:\n # dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1\n # maxlen = max(dp[i][j], maxlen)\n\n # return maxlen * maxlen\n # opt 2\n rows = len(strArr)\n columns = len(strArr[0]) if rows > 0 else 0\n dp = [0 for j in range(columns)]\n maxlen = 0\n prev = 0\n\n for i in range(rows):\n for j in range(columns):\n temp = dp[j]\n if i > 0 and j > 0 and int(strArr[i][j]) == 1:\n dp[j] = min(dp[j], dp[j-1], prev) + 1\n maxlen = max(dp[j], maxlen)\n else:\n dp[j] = int(strArr[i][j])\n prev = temp\n\n return maxlen * maxlen",
"def is_incremental(nums: np.ndarray) -> bool:\n for i in range(1, len(nums)):\n if nums[i] <= nums[i-1]:\n return False\n return True",
"def maxNumberOfApples(self, arr):\r\n arr.sort()\r\n apples = units = 0\r\n for _, weight in enumerate(arr):\r\n units += weight\r\n if units > 5000:\r\n break\r\n apples += 1\r\n return apples",
"def solution(digits):\n biggest = [0]*5\n for i, _ in enumerate(digits[:-4]):\n prev = False\n for j in range(5):\n if prev or int(digits[i+j]) >= biggest[j]:\n if int(digits[i+j]) > biggest[j]:\n prev = True\n biggest[j] = int(digits[i + j])\n else:\n break\n return int(''.join(str(x) for x in biggest))",
"def list_if_few(arr, nmax=10):\n if len(arr) <= nmax:\n s = ', '.join(arr)\n else:\n s = ''\n \n return s",
"def test_maximum_case(num_test, array, expected):\n if maximum(array) == expected:\n print(\"Test\", num_test, \"OK\")\n return\n\n print(\"Test\", num_test, \"FAIL\")",
"def simple_linear_solution(arr):\n hash_table = set(arr)\n minimum = 0\n\n while minimum in hash_table:\n minimum += 1\n\n return minimum",
"def Solution8():\n numbers = (int(c) for c in \"\".join(Data.data8.strip().splitlines()))\n return max(product(nums) for nums in traverse(numbers, 13, 1))",
"def descending(i):\n largest = 10\n for c in str(i):\n x = int(c)\n if x >= largest:\n return False\n else:\n largest = x\n return True",
"def largest_element(arr: List[int]) -> int:\n return(max(arr))",
"def sum3_in_list(arr: list, num: int) -> bool:\n found = False\n while not found:\n for i in range(0, len(arr) - 2):\n for j in range(i + 1, len(arr) - 1):\n for k in range(j + 1, len(arr)):\n if (arr[i] + arr[j] + arr[k]) == num:\n found = True\n return found",
"def max_subarray_brute_1(array):\n n = len(array)\n max_sum = -float(\"inf\")\n for i in range(n):\n for j in range(i, n):\n curr_sum = sum(array[i:j + 1])\n max_sum = max(max_sum, curr_sum)\n return max_sum",
"def maximumSubArray(arr):\n\tn = len(arr)\n\tmax_val = [-float('inf') for i in range(n)]\n\tmax_val[0] = arr[0]\n\tfor i in range(1, n):\n\t\tmax_val[i] = max(max_val[i-1]+arr[i], arr[i])\n\treturn max_val[n-1]",
"def greedy(self, nums):\n if not nums: return False\n L = len(nums)\n end = L - 1\n for i in range(L - 2, -1, -1):\n if nums[i] + i >= end:\n end = i\n return end == 0",
"def max_subarray_brute_3(array):\n n = len(array)\n # creating array of prefix sums\n prefix_sums = [0] * n\n prefix_sums[0] = array[0]\n for i in range(1, n):\n prefix_sums[i] = prefix_sums[i - 1] + array[i]\n # for convinience, when checking sum of all elements before first element\n prefix_sums.append(0)\n\n # looking for the maximum sum\n max_sum = -float(\"inf\")\n a, b = 0, 0 # start and end indices of max subarray\n for i in range(n):\n for j in range(i, n):\n curr_sum = prefix_sums[j] - prefix_sums[i - 1]\n if curr_sum > max_sum:\n max_sum = curr_sum\n a, b = i, j\n return max_sum, array[a:b + 1]",
"def max_pairwise_product(numbers):\n sorted_numbers = sorted(map(int, numbers.split()))\n num_len = len(sorted_numbers)\n return sorted_numbers[num_len - 1] * sorted_numbers[num_len - 2]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Saves and loads results of function to json.
|
def save_load_results_to_json(func, print=True):
def wrapper(filename, *args, **kwargs):
full_path = os.path.join(_path, filename)
if os.path.exists(full_path):
if print:
logging.info("Loading results for %s from %s." % (func.__name__, filename))
with open(full_path, "r") as f:
return json.load(f)
obj = func(*args, **kwargs)
with open(full_path, "w") as f:
if print:
logging.info("Saving results for %s from %s." % (func.__name__, filename))
json.dump(obj, f)
return obj
return wrapper
|
[
"def saveResults(self):\n fname = join(self.seriesOutputDir, 'results.json')\n with open(fname, 'w') as outputFile:\n outputFile.write(json.dumps(self.results))",
"def save_rms_data(filename, results):\n with open(filename, \"w\") as f:\n json.dump(results, f)",
"def __export_to_json(self):\n\n self.__create_export_dir()\n\n class NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n dumped = json.dumps(self.results, cls=NumpyEncoder)\n\n with open(self.__generate_export_name(\"json\"), \"w\") as outFile:\n json.dump(dumped, outFile)\n logger.info(\"Export to JSON completed!\")",
"def store_result(result: dict, filepath: str):\n\n WebserverMapProcessor.store_json_convertible_result(result, filepath)",
"def save(self):\r\n try:\r\n with open(self.json_name(), \"w\") as json_file:\r\n json_str = dumps(self.values)\r\n json_file.write(json_str)\r\n except:\r\n print(\"Error: Writing data to file failed\")",
"def to_json(results, out_filename):\n # Generate (yield) all the results before exporting to JSON\n results = list(results)\n\n with smart_open(out_filename) as out_file:\n json_content = utils.json_for(results)\n\n out_file.write(json_content + \"\\n\")\n\n if out_file is not sys.stdout:\n logging.warning(\"Wrote results to %s.\", out_filename)",
"def writejson(self):\t\t\n\t\twith open(self.filename, 'w+') as outfile:\n\t\t\tjson.dump(self.cache, outfile, sort_keys=True, indent=4)",
"def code_to_json(fn):\n data = code_to_dict(fn)\n data = code_encode_json_hook(data)\n return json.dumps(data)",
"def write_results(self,results_dict):",
"def _export_runs(instdir, benchmark_name, implementation_name, filename): \n run_dicts = get_run_dicts(benchmark_name, implementation_name, instdir)\n with open(filename, \"w\") as fp:\n ret = fp.write(json.dumps(run_dicts))\n return ret",
"def save_json(data, file_name: str = 'hsweep'):\n # if results directory does not exist, create it!\n results_path = check_results_path()\n\n file_path = results_path / Path(file_name + '.json')\n\n with open(file_path, 'w') as f:\n json.dump(data, f)",
"def save_results(mean_gbps, stddev_gbps):\n filename = args.traffic.split('/')[-1]\n outfile = OUTDIR + filename\n\n if os.path.isfile(outfile):\n # Append to existing json; don't clobber\n with open(outfile, 'r') as f:\n results = json.load(f)\n\n os.remove(outfile)\n else:\n results = {}\n\n results['%s_mean_gbps' % args.algorithm] = mean_gbps\n results['%s_stddev_gbps' % args.algorithm] = stddev_gbps\n\n with open(outfile, 'w') as f:\n json.dump(results, f)\n\n print 'Results saved to %s!' % outfile",
"def store_result(self, url: str, result: Result) -> None:\n self.results[url] = result\n\n with open(self.result, 'w') as result_file:\n output = {}\n\n for url, result in self.results.items():\n output[url] = result.json\n\n json.dump(output, result_file)",
"def savejson(self, cache):\n\n contents = json.dumps(cache)\n self.cachepath.write_text(contents)",
"def serialize(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n return json.dumps(func(*args, **kwargs))\n return wrapper",
"def write_json_results(all_test_results):\n all_results = []\n for name, res in all_test_results.items():\n all_results.append(res)\n json_str = json.dumps(all_results, cls=TestResultEncoder)\n json_file = open('test_results.json', 'w', encoding='utf8')\n json_file.write(json_str)\n json_file.close()",
"def jsonreturning(fn):\n fn, props = _decorate_once(fn)\n import jsonsupport\n response_filters = props.setdefault('response_filters', [])\n response_filters.append(jsonsupport.convert_to_json)\n props['return_type'] = 'JSON'\n return fn",
"def write_as_json(results, output_file_json: TextIO):\n with output_file_json:\n json.dump(results, output_file_json, sort_keys=True)",
"def save(self):\n json_dict = {}\n for key_id in self.__objects.keys():\n json_dict[key_id] = self.__objects[key_id].to_dict()\n with open(self.__file_path, \"w\") as f:\n f.write(json.dumps(json_dict))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send a syslog to the server. Make sure the port is open though
|
def send_syslog(string):
global SYSLOGSOCK
if not SYSLOGSOCK:
print("Creating socket to", HOST, PORT)
SYSLOGSOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SYSLOGSOCK.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
SYSLOGSOCK.connect((HOST, PORT))
string = string.rstrip() + "\n"
SYSLOGSOCK.sendall(string.encode())
|
[
"def test3_output_syslog_enable(self):\n cmd = 'python3 -c \"from dnstap_receiver.receiver import start_receiver; start_receiver()\" -c ./tests/dnstap_syslog.conf'\n o = execute_dnstap(cmd)\n \n self.assertRegex(o, b\"Output handler: syslog\")",
"def handle_syslog_request(self):\n output_dict = {\"status\": \"FAILED\", \"message\": \"None\"}\n try:\n log = SendSyslog(host=self._module.paramgram[\"syslog_host\"],\n port=self._module.paramgram[\"network_port\"],\n protocol=self._module.paramgram[\"network_protocol\"],\n level=self._module.paramgram[\"syslog_level\"],\n facility=self._module.paramgram[\"syslog_facility\"],\n ssl_context=self.create_ssl_context(),\n )\n output_dict = log.send(header=self._module.paramgram[\"syslog_header\"],\n message=self._module.paramgram[\"syslog_message\"])\n except BaseException as err:\n raise FSMBaseException(msg=\"handle_syslog_request() couldn't send the syslog. Error: \" + str(err))\n return output_dict",
"def _syslog(level, message):\n message = str(message)\n print message\n syslog.syslog(level, LOG_PREFIX + message)",
"def __logSyslog(self, message, sev=syslog.LOG_NOTICE):\n \n # Log it.\n syslog.syslog(sev, message)",
"def handle_test_syslog(self, msg):\n logger.debug(\"sending test syslog\")\n txt = \"%s test syslog\" % msg.fabric\n msg.wf.send_notification(\"any_syslog\", txt, txt)",
"def send_to_syslog(events, syslog):\r\n for cnt, event in enumerate(events, start=1):\r\n syslog.send(json.dumps(event))\r\n logging.debug('Event %s sent to syslog: %s.', cnt, json.dumps(event))\r\n logging.debug('Total Events: %s ', cnt)",
"def init_syslog(level, process_ident, address='/dev/log', facility=LOG_DAEMON):\n logging.root.setLevel(level)\n if len(logging.root.handlers) == 0:\n fmt = '%s %%(levelname)s: %%(message)s' % process_ident\n hdlr = logging.handlers.SysLogHandler(address, facility)\n hdlr.setFormatter(logging.Formatter(fmt))\n logging.root.addHandler(hdlr)",
"def sendSyslog(switchlist, text):\n\n cvplogger = logging.getLogger('CvpLogger')\n cvplogger.setLevel(logging.WARNING)\n termlogger = logging.StreamHandler(sys.stdout)\n logwriter = logging.handlers.SysLogHandler(address= SYSLOGSERVER) #, 514))\n cvplogger.addHandler(logwriter)\n cvplogger.addHandler(termlogger)\n for switch in switchlist:\n cvplogger.critical('%s %s' % (text, switch))\n logwriter.close()\n cvplogger.removeHandler(logwriter)\n termlogger.close()\n cvplogger.removeHandler(termlogger)",
"def cfg_syslog_server(self, checked = False, ip_addr = \"\", log_level = \"\"):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM)\n\n if checked:\n if log_level == self.LOG_CRITICAL_WARNING:\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_medium_radio'])\n\n elif log_level == self.LOG_CRITICAL_ONLY:\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_low_radio'])\n\n else:\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_high_radio'])\n\n if not self.s.is_checked(self.info['loc_cfg_system_syslog_enable_checkbox']):\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_enable_checkbox'])\n\n self.s.type_text(self.info['loc_cfg_system_syslog_server_textbox'], ip_addr)\n\n else:\n if self.s.is_checked(self.info['loc_cfg_system_syslog_enable_checkbox']):\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_enable_checkbox'])\n\n if not self.s.is_checked(self.info['loc_cfg_system_syslog_high_radio']):\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_high_radio'])\n\n self.s.click_and_wait(self.info['loc_cfg_system_syslog_apply_button'], 3)\n if self.s.is_alert_present(5):\n msg = self.s.get_alert()\n raise Exception(msg)",
"def configure_syslog(run_command, port, in_syslog_cfg, rsyslog_cfg, syslog_ng_cfg):\n if not is_rsyslog_installed() and not is_syslog_ng_installed():\n return 0, 'configure_syslog(): Nothing to do: Neither rsyslog nor syslog-ng is installed on the system'\n\n # 1. Unconfigure existing syslog instance (if any) to avoid duplicates\n # Continue even if this step fails (not critical)\n cmd_exit_code, cmd_output = unconfigure_syslog(run_command)\n extra_msg = ''\n if cmd_exit_code != 0:\n extra_msg = 'configure_syslog(): configure_syslog.sh unconfigure failed (still proceeding): ' + cmd_output\n\n # 2. Configure new syslog instance with port number.\n # Ordering is very tricky. This must be done before modifying /etc/syslog-ng/syslog-ng.conf\n # or /etc/rsyslog.d/95-omsagent.conf below!\n cmd_exit_code, cmd_output = run_omsagent_config_syslog_sh(run_command, 'configure', port)\n if cmd_exit_code != 0:\n return 2, 'configure_syslog(): configure_syslog.sh configure failed: ' + cmd_output\n\n # 2.5. Replace '%SYSLOG_PORT%' in all passed syslog configs with the obtained port number\n in_syslog_cfg = in_syslog_cfg.replace(syslog_port_pattern_marker, str(port))\n rsyslog_cfg = rsyslog_cfg.replace(syslog_port_pattern_marker, str(port))\n syslog_ng_cfg = syslog_ng_cfg.replace(syslog_port_pattern_marker, str(port))\n\n # 3. Configure fluentd in_syslog plugin (write the fluentd plugin config file)\n try:\n with open(fluentd_syslog_src_cfg_path, 'w') as f:\n f.write(in_syslog_cfg)\n except Exception as e:\n return 3, 'configure_syslog(): Writing to omsagent.d/syslog.conf failed: {0}'.format(e)\n\n # 4. Update (add facilities/levels) rsyslog or syslog-ng config\n try:\n if is_syslog_ng_installed():\n append_string_to_file(syslog_ng_cfg, syslog_ng_conf_path)\n elif is_new_rsyslog_installed():\n append_string_to_file(rsyslog_cfg, rsyslog_d_omsagent_conf_path)\n else: # old rsyslog, so append to rsyslog_top_conf_path\n append_string_to_file(rsyslog_cfg, rsyslog_top_conf_path)\n except Exception as e:\n return 4, 'configure_syslog(): Adding facilities/levels to rsyslog/syslog-ng conf failed: {0}'.format(e)\n\n # 5. Restart syslog\n cmd_exit_code, cmd_output = restart_syslog(run_command)\n if cmd_exit_code != 0:\n return 5, 'configure_syslog(): Failed at restarting syslog (rsyslog or syslog-ng). ' \\\n 'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n # All succeeded\n return 0, 'configure_syslog(): Succeeded. Extra message: {0}'.format(extra_msg if extra_msg else 'None')",
"def __log_file(self):\n while True:\n line = self.fd.readline()\n if not line: break\n syslog.syslog(self.p, line)",
"def setup_syslog(self):\n if 'related_id' in self.config:\n for val in self.config['related_id']:\n if self.config['related_conf'][val]['instance_type'] == \"syslog_settings\":\n syslog_set_id = val\n # TODO check on first 1.1 build syslog parameters value\n syslog_ip = self.config['related_conf'][syslog_set_id]['ip']\n syslog_proto = self.config['related_conf'][syslog_set_id]['proto']\n syslog_port = self.config['related_conf'][syslog_set_id]['port']\n syslog_localport = self.config['related_conf'][syslog_set_id]['localport']\n syslog_transport = self.config['related_conf'][syslog_set_id]['transport']\n syslog_facility = self.config['related_conf'][syslog_set_id]['facility']\n syslog_severity = self.config['related_conf'][syslog_set_id]['severity']\n try:\n self.ui.create_syslog(syslog_proto, syslog_ip, syslog_port, syslog_localport, syslog_transport, syslog_facility, syslog_severity)\n self.class_logger.debug(\"Syslog configuration finished. Syslog server: %s, proto: %s\" % (syslog_ip, syslog_proto))\n return\n except Exception as err:\n self.class_logger.debug(\"Syslog configuration skipped. Some error occurs %s\" % (err, ))\n self.class_logger.debug(\"Syslog configuration skipped. Syslog settings not found.\")",
"def do_syslog_tail(dut):",
"def restart_syslog(run_command):\n return run_omsagent_config_syslog_sh(run_command, 'restart') # port param is dummy here.",
"def packagelog(op, status, names):\n if not names:\n return\n\n # write it to the syslog if that's what the user wants\n if arizonaconfig.get_option(\"log\"):\n try:\n for name in names:\n arizonareport.send_syslog(arizonareport.INFO, \\\r\n \"[stork] \" + op + \" <\" + status + \"> \" + name)\r\n except TypeError:\r\n arizonareport.send_error(0, \"failed to write syslog\")\r\n\r\n # write it to the log file if that's what the user wants\r\n fn = arizonaconfig.get_option(\"packagelogfile\")\n if fn:\n try:\n file = open(fn, \"a\")\n if file:\n for name in names:\n file.write(op + \" <\" + status + \"> \" + name + \"\\n\")\n file.close()\n except IOError:\n arizonareport.send_error(0, \"failed to write package log: \" + str(fn))",
"def _print_logs():\n import zmq\n with zmq.Context() as context:\n with context.socket(zmq.REP) as socket:\n socket.bind(\"tcp://{}:{}\".format(ip, port))\n logger.info(\"started log server on {}:{}\".format(ip, port))\n\n while True:\n message = socket.recv()\n print(message.decode(\"utf-8\"))\n socket.send(b\"received\")",
"def push(logs,config):\n \n udpConfig=dict(config)\n \n udp_host=str(udpConfig[\"hostname\"])\n udp_port=int(udpConfig[\"port\"])\n \n sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n for log in logs:\n try:\n sock.sendto(str(log),(udp_host,udp_port))\n except Exception as err:\n print('UDP connection failed. {0}'.format(err))\n Graylog2Logger.logToGraylog2('UDP connection failed. {0}'.format(err))",
"def do_debug(sock):\n sock.sendall('DEBUG |')\n msg = sock.recv(64)\n if msg != 'success':\n print msg",
"def host_scraper_log():\n return send_from_directory('/home/ubuntu/mhm-scraper', 'log.txt')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send a bunch of data every few seconds to simulate actual callback stuff
|
def send_timed(count, interval=30):
print("Sending data", count, "times at an interval of", interval, "seconds")
for i in range(count):
# 50% chance to send 2-5 creds
if random.random() < 0.50:
for j in range(random.randint(2, 5)):
cred = gen_cred()
print("Sending credential", cred)
send_syslog(cred)
# Send a 10-20 beacons every few seconds
for j in range(random.randint(10,20)):
callback = gen_callback()
print("Sending callback", callback)
send_syslog(callback)
time.sleep(random.randint(0,3)) # Sleep for 1-3 seconds and then send the next beacon
st = max(1, interval + random.randint(-15, 15))
print("Sleeping for", st, "seconds. (Iteration {})".format(i))
time.sleep(st) # Sleep for interval +- 15 seconds
|
[
"def main():\n initData()\n\n # Loop to simulate data connections\n # Current set up\n # Once an hour send hsk\n # Every 10 min send hsk\n # Every 5 min send spec and nrbd\n # This can be changed for different cycles\n while 1:\n connection([\"time\",\"spec\",\"nrbd\",\"hsk\"])\n for j in range(0,10):\n sleep(5*60)\n connection([\"spec\",\"nrbd\"])\n sleep(5*60)\n connection([\"spec\",\"nrbd\",\"hsk\"])",
"def test_call_timeout(self):\n def generator_func(count, soft_delay, hard_delay):\n for _ in xrange(count):\n yield soft_delay\n sleep(hard_delay)\n\n # add 'noise', i.e. something else the callback should be handling at the same time\n self._dispersy.callback.register(generator_func, (50, 0.1, 0.5))\n\n # test on the same thread\n begin = time()\n result = self._dispersy.callback.call(generator_func, (1, 2.0, 0.0), timeout=1.0, default=\"timeout\")\n end = time()\n self.assertGreaterEqual(end - begin, 1.0)\n self.assertEqual(result, \"timeout\")\n\n # test on a separate thread\n def separate_thread():\n begin = time()\n result = self._dispersy.callback.call(generator_func, (1, 2.0, 0.0), timeout=1.0, default=\"timeout\")\n end = time()\n self.assertGreaterEqual(end - begin, 1.0)\n self.assertEqual(result, \"timeout\")\n\n thread = Thread(target=separate_thread)\n thread.start()\n thread.join(2.0)\n self.assertFalse(thread.is_alive())",
"def sleep_and_run(self):\n sleep_start = time.time()\n time_left = self.time_left\n if time_left > 0:\n time.sleep(self.time_left)\n self.reset()\n next(self.callback)",
"def clock(rpc):\n\n while True:\n yield from rpc.notify('clock', str(datetime.datetime.now()))\n yield from asyncio.sleep(1)",
"def sleep_asynchronously():\n time.sleep(20)",
"def run_continously(self):\n while self.totalTimes > 0:\n self.put_record()\n time.sleep(self.sleepInterval)\n self.totalTimes = self.totalTimes - 1",
"def test_message_sendlater():\n def helper_func(token):\n \"\"\" Check that there are no messages in channel \"\"\"\n url = (f\"{get_url()}/channel/messages?token={token}\"\n f\"&channel_id={channel['channel_id']}&start=0\")\n response = urllib.request.urlopen(url)\n payload = json.load(response)\n\n assert payload['messages'] == []\n\n user = server_create_user(\"email@email.com\", \"password\", \"Prince\", \"Ali\")\n channel = server_create_channel(user['token'], \"test_channel\", True)\n\n # Run test check empty a second after message_sendlater has been\n # called (but hasn't finished executing)\n new_thread = threading.Timer(1.5, helper_func, args=(user['token']))\n new_thread.start()\n\n # Send a message later\n time_sent = datetime.now() + timedelta(seconds=2)\n time_sent = int(time_sent.timestamp())\n data = json.dumps({'token': user['token'],\n 'channel_id': channel['channel_id'],\n 'message': \"omegalul\",\n 'time_sent': time_sent}).encode('utf-8')\n req = urllib.request.Request(f\"{get_url()}/message/sendlater\",\n data=data,\n headers={'Content-Type': 'application/json'},\n method='POST')\n response = urllib.request.urlopen(req)\n json.load(response)\n\n url = (f\"{get_url()}/channel/messages?token={user['token']}\"\n f\"&channel_id={channel['channel_id']}&start=0\")\n response = urllib.request.urlopen(url)\n payload = json.load(response)\n\n assert len(payload['messages']) == 1\n assert payload['messages'][0]['message'] == \"omegalul\"\n assert payload['messages'][0]['time_created'] == time_sent",
"def do_something_every_hour():\n sleep(5)",
"def running(self):\n self.sendData()",
"def callback_serial(self):\r\n while self.queue_serial.qsize():\r\n try:\r\n queuer = self.queue_serial.get(0)\r\n queuer.daemon=True\r\n queuer.start()\r\n except Empty:\r\n pass\r\n self.callback_obj1=self.after(1000, self.callback_serial)",
"def _callback_cmd_sleep(self, cmd):\n\n self._increment_time(cmd.ms)",
"def alice_send():\n count = 1\n while True:\n buffer.append(f'Alice send message for the {count} time.')\n count += 1\n sleep(5)",
"def main():\n while True:\n reply()\n time.sleep(60)",
"def _updater(interval, set_func, data_func):\n interval /= 1000\n\n def f():\n while True:\n set_func(data_func())\n time.sleep(interval)\n\n threading.Thread(target=f, daemon=True).start()",
"def broadcast_loop():\n\n while True:\n broadcast()\n\n time.sleep(0.5)",
"def timedCall(self, ms, callback, *args, **kwargs):\n f = lambda: callback(*args, **kwargs)\n f2 = lambda: QTimer.singleShot(ms, f)\n self._posted.emit(f2)",
"def _rt_sample_sendloop(self):\r\n # start delayed in order to have a fully initialized device when waveforms start\r\n # (otherwise timing issues might happen)\r\n time.sleep(0.1)\r\n timer = intervaltimer.IntervalTimer(periodInSeconds=self.collectRtSamplesPeriod)\r\n while self._runRtSampleThread:\r\n behindScheduleSeconds = timer.waitForNextIntervalBegin()\r\n try:\r\n self._mdib.update_all_rt_samples() # update from waveform generators\r\n self._logWaveformTiming(behindScheduleSeconds)\r\n except Exception:\r\n self._logger.warn(' could not update real time samples: {}', traceback.format_exc())",
"def test_send_callback(self):\n test_is_done = threading.Event()\n data = [\n {'topic': 'topic1', 'data': 'data1', 'options': {}},\n {'topic': 'topic2', 'data': 'data2', 'options': None}\n ]\n\n def started(client):\n \"\"\"started listener\"\"\"\n def send_callback(err, topic, d, options):\n \"\"\"send callback\"\"\"\n opts = data.pop()\n assert err is None\n assert topic == opts['topic']\n assert d == opts['data']\n assert options == opts['options']\n if len(data) == 0:\n client.stop()\n test_is_done.set()\n\n try:\n for test in reversed(data):\n client.send(test['topic'],\n test['data'],\n test['options'],\n send_callback)\n except Exception as exc:\n pytest.fail('Unexpected Exception ' + str(exc))\n client = mqlight.Client('amqp://host',\n client_id='test_send_callback',\n on_started=started)\n test_is_done.wait(self.TEST_TIMEOUT)\n assert test_is_done.is_set()",
"def run(self):\n clock = SoftRealTimeClock(period=self.interval)\n while threading.main_thread().is_alive():\n self._callback()\n clock.sleep()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overall Allocation Cost Metric
|
def overall_cost(system, control_input, environment_input):
costs = _calc_resource_allocation_cost(system, control_input, environment_input)
return sum(costs) if len(costs) > 0 else 0.0
|
[
"def cost_perf_index(self):\n \n ev = self.apc * self.budget\n \n return ev / self.ac",
"def administration_overhead_cost(self, *args, **kwargs):\n result = 0\n for overhead in self.overhead_list:\n result += overhead.administration_overhead_rate\n return round((self.material_cost() + self.manufacturing_cost) * result / 100, 2)",
"def total_cost(self):\n return sum(self.edges[e].V for e in self.edges)",
"def _calc_resource_allocation_cost(system, control_input, environment_input):\n costs = []\n for app in system.apps:\n # TODO: calculate only for internal nodes?\n for node in system.nodes:\n if not control_input.app_placement[app.id][node.id]:\n continue\n nb_instances = 1\n if isinstance(node, GlobalNode):\n nb_instances = environment_input.get_nb_instances(app.id, node.id)\n nb_instances = int(max(1, nb_instances))\n\n cost = 0.0\n for resource in system.resources:\n alloc_resource = control_input.allocated_resource[app.id][node.id][resource.name]\n # TODO: is this multiplication by number of instances really necessary?\n cost += nb_instances * node.cost[resource.name](alloc_resource)\n if system.sampling_time > 0.0:\n cost *= system.sampling_time\n costs.append(cost)\n return costs",
"def special_total_cost(self):\n return round(self.packaging_cost + self.freight_cost + self.duty_cost + self.tooling_cost(), 2)",
"def allocated(self):\n alloc = 0\n for expense in self.expenses:\n alloc += expense.budget\n return alloc",
"def get_total_cost(self):\n dvs = [norm(dv) for dv in self._dvs]\n return sum(dvs, 0 * u.km / u.s)",
"def current_capacity(self) -> float:\n now = timezone.now().date()\n return sum(a.percentage for a in RSEAllocation.objects.filter(rse=self, start__lte=now, end__gt=now, project__status='F'))",
"def capacity(self):\r\n if self.learning_rule == 'Hebbian':\r\n self._capacity = self.nbr_attractors / (2 * log(self.nbr_attractors))\r\n\r\n elif self.learning_rule == 'Storkey':\r\n self._capacity = self.nbr_attractors / (sqrt(2 * log(self.nbr_attractors)))\r\n\r\n print('Network\\'s capacity is {}'.format(round(self._capacity, 2)))",
"def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )",
"def compute_aggregated_cost(self, costs, constraints=None):\n pass",
"def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost",
"def total_cost(self, *args, **kwargs):\n return round(self.material_cost() + self.manufacturing_cost + self.overhead_cost() + self.special_cost() + self.profit(), 2)",
"def operating_cost(self):\n return self._cost_data",
"def calculate_capacity_for(m_name, m_pods, m_cpu, m_mem, node_map):\n # print(\n # f\"Checking capacity of metric: {m_name}\\n\"\n # f\" CPU: {m_cpu}\\n\"\n # f\" memory: {m_mem}\\n\"\n # f\" pods: {m_pods}\"\n # )\n\n metric_capacity = 0\n for node in node_map.values():\n # print(f\"Examining available capacity in node: {node['name']}\")\n pods = node[\"available\"][\"pods\"]\n cpu = node[\"available\"][\"cpu\"]\n mem = node[\"available\"][\"memory\"]\n\n if pods < 1:\n continue\n\n node_capacity = 0\n\n # print(f\"Comparing required CPU: {m_cpu} to node available CPU: {cpu}\")\n if m_cpu is not None and m_cpu > 0:\n if m_cpu >= cpu:\n continue\n\n m_count = floor(cpu / m_cpu)\n # print(\n # f\"Node has {m_count} capacity in terms of CPU (req: {m_cpu}, avail: {cpu})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n # print(f\"Comparing required Memory: {m_mem} to node available Memory: {mem}\")\n if m_mem is not None and m_mem > 0:\n if m_mem >= mem:\n continue\n\n m_count = floor(mem / m_mem)\n # print(\n # f\"Node has {m_count} capacity in terms of Memory (req: {m_mem}, avail: {mem})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n node_capacity = 1 if node_capacity < 1 else min(node_capacity, pods)\n # print(f\"Node: {node['name']} has CPU/memory capacity: {node_capacity}\")\n\n metric_capacity += node_capacity\n # print(\n # f\"After adding capacity {node_capacity} on node: {node['name']}, \" \\\n # f\"capacity of {m_name} is {metric_capacity}\\n\"\n # )\n\n # print(f\"Comparing required pods: {m_pods} to total available pods: {metric_capacity}\")\n if m_pods is not None and metric_capacity > m_pods:\n metric_capacity = floor(metric_capacity / m_pods)\n\n # print(\n # f\"After factoring out pod-count / cluster capacity {m_pods}, capacity of {m_name} is {metric_capacity}\\n\\n\"\n # )\n\n return metric_capacity",
"def compute_average_macs_cost(self):\n\n for m in self.modules():\n m.accumulate_macs = accumulate_macs.__get__(m)\n\n macs_sum = self.accumulate_macs()\n\n for m in self.modules():\n if hasattr(m, 'accumulate_macs'):\n del m.accumulate_macs\n\n return macs_sum / self.__batch_counter__",
"def material_subtotal_cost(self):\n return round(self.bom_cost() + self.loss_cost() + self.material_overhead_cost() + self.indirect_cost, 2)",
"def development_overhead_cost(self, *args, **kwargs):\n result = 0\n for overhead in self.overhead_list:\n result += overhead.development_overhead_rate\n return round((self.material_cost() + self.manufacturing_cost) * result / 100, 2)",
"def _cost_function(self) -> None:\n\n period_length = sum(self.load_hist.values())\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_prices[_ic]\n * period_length\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_prices[_ic]\n * self.load_hist[_l]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: minimize cost\",\n )",
"def calculate_total_cost(self) -> int:\n fixed = len(json.dumps({\n \"pre_block\": '',\n \"arguments\": {\n \"key\": '',\n \"value\": ''\n }\n })) + 64\n s = 0\n for k, v in self.__cache_dict.items():\n if not v[1]:\n s += fixed + len(k) + len(v[0])\n\n for k in self.__delete_set:\n s += fixed + len(k)\n return s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Maximum Allocation Cost Metric
|
def max_cost(system, control_input, environment_input):
costs = _calc_resource_allocation_cost(system, control_input, environment_input)
return max(costs) if len(costs) > 0 else 0.0
|
[
"def _GetMaximalMetrics(self):\n metrics = list(self._hpc.free_metrics)\n metrics += list(self._hpc.non_free_metrics)[0:self._hpc.max_counters]\n return metrics",
"def max_capacity(self) -> jsii.Number:\n return self._values.get(\"max_capacity\")",
"def compute_optimum(self):\n assert self.sample_dataframe is not None and len(\n self.sample_dataframe) > 0\n\n slo_type = self.sample_dataframe['slo_type'].iloc[0]\n if slo_type == 'latency':\n perf_arr = 1. / self.sample_dataframe['qos_value']\n else:\n perf_arr = self.sample_dataframe['qos_value']\n\n perf_over_cost = perf_arr / self.sample_dataframe['cost']\n return np.max(perf_over_cost)",
"def MaxOverload(load):\n per_device_load = tf.reduce_sum(tf.reshape(load, [tf.shape(load)[0], -1]), 1)\n return (tf.reduce_max(per_device_load) /\n (tf.reduce_mean(per_device_load) + 1e-10))",
"def capacity(self):\r\n if self.learning_rule == 'Hebbian':\r\n self._capacity = self.nbr_attractors / (2 * log(self.nbr_attractors))\r\n\r\n elif self.learning_rule == 'Storkey':\r\n self._capacity = self.nbr_attractors / (sqrt(2 * log(self.nbr_attractors)))\r\n\r\n print('Network\\'s capacity is {}'.format(round(self._capacity, 2)))",
"def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )",
"def max_memory_allocated(self) -> int:\n return max(t.allocated for t in self.memory_traces)",
"def MaximumThreshold(self) -> int:",
"def max_cargo_mass_utilization(self):\n\n if not self.trip_data:\n return np.NaN\n\n return np.max(self.cargo_mass_utilizations)",
"def min_memory_analytic(self, n_train, n_atoms):\n return (8 * ((n_train * 3 * n_atoms) ** 2)) * 1e-6",
"def max_memory(self) -> int:\n raise NotImplementedError",
"def getMaxPool() -> uint256:\n return self.maxPool",
"def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost",
"def _calc_resource_allocation_cost(system, control_input, environment_input):\n costs = []\n for app in system.apps:\n # TODO: calculate only for internal nodes?\n for node in system.nodes:\n if not control_input.app_placement[app.id][node.id]:\n continue\n nb_instances = 1\n if isinstance(node, GlobalNode):\n nb_instances = environment_input.get_nb_instances(app.id, node.id)\n nb_instances = int(max(1, nb_instances))\n\n cost = 0.0\n for resource in system.resources:\n alloc_resource = control_input.allocated_resource[app.id][node.id][resource.name]\n # TODO: is this multiplication by number of instances really necessary?\n cost += nb_instances * node.cost[resource.name](alloc_resource)\n if system.sampling_time > 0.0:\n cost *= system.sampling_time\n costs.append(cost)\n return costs",
"def getCapacityFactor(self): \n return self.capFact",
"def objective_function(constraint):\n\n def maximum_stress(solution):\n unflattened_solution = BridgeFactory.preprocess_solution(\n constraint, solution\n )\n load_map = Bridge._create_load_map(unflattened_solution)\n\n current_max_overstress = 0.0\n for row in range(Bridge.HEIGHT):\n for column in range(Bridge.WIDTH):\n cell_overstress = (\n load_map[row][column] - unflattened_solution[row][column]\n )\n if cell_overstress > current_max_overstress:\n current_max_overstress = cell_overstress\n return current_max_overstress\n\n return maximum_stress",
"def getMaxUses(self):\n return self.handle.maxUses",
"def max_utility(self):\n return 100",
"def cost_multiplier(self):\n return 1.0",
"def calculate_capacity_for(m_name, m_pods, m_cpu, m_mem, node_map):\n # print(\n # f\"Checking capacity of metric: {m_name}\\n\"\n # f\" CPU: {m_cpu}\\n\"\n # f\" memory: {m_mem}\\n\"\n # f\" pods: {m_pods}\"\n # )\n\n metric_capacity = 0\n for node in node_map.values():\n # print(f\"Examining available capacity in node: {node['name']}\")\n pods = node[\"available\"][\"pods\"]\n cpu = node[\"available\"][\"cpu\"]\n mem = node[\"available\"][\"memory\"]\n\n if pods < 1:\n continue\n\n node_capacity = 0\n\n # print(f\"Comparing required CPU: {m_cpu} to node available CPU: {cpu}\")\n if m_cpu is not None and m_cpu > 0:\n if m_cpu >= cpu:\n continue\n\n m_count = floor(cpu / m_cpu)\n # print(\n # f\"Node has {m_count} capacity in terms of CPU (req: {m_cpu}, avail: {cpu})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n # print(f\"Comparing required Memory: {m_mem} to node available Memory: {mem}\")\n if m_mem is not None and m_mem > 0:\n if m_mem >= mem:\n continue\n\n m_count = floor(mem / m_mem)\n # print(\n # f\"Node has {m_count} capacity in terms of Memory (req: {m_mem}, avail: {mem})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n node_capacity = 1 if node_capacity < 1 else min(node_capacity, pods)\n # print(f\"Node: {node['name']} has CPU/memory capacity: {node_capacity}\")\n\n metric_capacity += node_capacity\n # print(\n # f\"After adding capacity {node_capacity} on node: {node['name']}, \" \\\n # f\"capacity of {m_name} is {metric_capacity}\\n\"\n # )\n\n # print(f\"Comparing required pods: {m_pods} to total available pods: {metric_capacity}\")\n if m_pods is not None and metric_capacity > m_pods:\n metric_capacity = floor(metric_capacity / m_pods)\n\n # print(\n # f\"After factoring out pod-count / cluster capacity {m_pods}, capacity of {m_name} is {metric_capacity}\\n\\n\"\n # )\n\n return metric_capacity"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Average Allocation Cost Metric
|
def avg_cost(system, control_input, environment_input):
costs = _calc_resource_allocation_cost(system, control_input, environment_input)
return mean(costs) if len(costs) > 0 else 0.0
|
[
"def compute_average_macs_cost(self):\n\n for m in self.modules():\n m.accumulate_macs = accumulate_macs.__get__(m)\n\n macs_sum = self.accumulate_macs()\n\n for m in self.modules():\n if hasattr(m, 'accumulate_macs'):\n del m.accumulate_macs\n\n return macs_sum / self.__batch_counter__",
"def getAvgCost(computeCost, node):\n # Retrieve allt he procs that can run this task\n allCost = computeCost[node]\n\n allRunCost = [i for i in allCost if i != INF]\n\n # return the average\n return sum(allRunCost)/float(len(allRunCost))",
"def cost_perf_index(self):\n \n ev = self.apc * self.budget\n \n return ev / self.ac",
"def avg_Ao(self):\n ...",
"def get_average_cost(self):\n open_lots = self.total_open_lots()\n if open_lots == 0 or not open_lots:\n return 0\n\n return abs(self.total_market_value()/self.total_open_lots())",
"def average_traffic(self):\n \n res=\"0\"\n if self.hits==0:\n return res\n else:\n res=\"%d kb/hit\" % (self.size/self.hits)\n return res",
"def get_total_cost(self):\n dvs = [norm(dv) for dv in self._dvs]\n return sum(dvs, 0 * u.km / u.s)",
"def compute_aggregated_cost(self, costs, constraints=None):\n pass",
"def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost",
"def total_cost(self):\n return sum(self.edges[e].V for e in self.edges)",
"def cost(self,output,y):\r\n\r\n return np.mean(np.square(output - y))",
"def cost(start, end, average=average):\n if end < start:\n raise ValueError(\"end cannot be greater than start\")\n return pow((cumVerseCounts[end]-cumVerseCounts[start-1])-average, 2)",
"def cost_variance(self):\n \n ev = self.apc * self.budget\n \n return ev - self.ac",
"def compute_averages(self):\n self.energy_average = self.cumulative_energy / self.N\n self.energy_squared_average = self.cumulative_squared_energy / self.N\n self.wave_function_derivative_average = self.cumulative_wave_function_derivative / self.N\n self.wave_function_energy_average = self.cumulative_wave_function_energy / self.N",
"def average_linkage(c1, c2):",
"def computeClassAverage(self):\n sum=0 #running\n for student in self.__classlist:\n sum+=student.percentageGen()\n \n return sum/len(self.__classlist)",
"def administration_overhead_cost(self, *args, **kwargs):\n result = 0\n for overhead in self.overhead_list:\n result += overhead.administration_overhead_rate\n return round((self.material_cost() + self.manufacturing_cost) * result / 100, 2)",
"def average_customers_in_system(self):\n raise NotImplementedError()",
"def calculate_cost(result):\n if 'input_list' in result:\n grades = [r['grade_decimal'] for r in result['input_list']]\n result['grade_decimal'] = consolidate_grades(grades)\n return 1 - result['grade_decimal']",
"def get_average(self):\n # compute the mean\n self.average_fit = statistics.mean([self.fitness_dict[key] for key in self.fitness_dict])\n self.average_age = statistics.mean([self.age_dict[key] for key in self.age_dict])\n\n # Add average fitness at each time step to the collector\n self.average_fit_list.append(self.average_fit)\n self.average_age_list.append(self.average_age)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate resource allocation cost for every application instance placed on nodes
|
def _calc_resource_allocation_cost(system, control_input, environment_input):
costs = []
for app in system.apps:
# TODO: calculate only for internal nodes?
for node in system.nodes:
if not control_input.app_placement[app.id][node.id]:
continue
nb_instances = 1
if isinstance(node, GlobalNode):
nb_instances = environment_input.get_nb_instances(app.id, node.id)
nb_instances = int(max(1, nb_instances))
cost = 0.0
for resource in system.resources:
alloc_resource = control_input.allocated_resource[app.id][node.id][resource.name]
# TODO: is this multiplication by number of instances really necessary?
cost += nb_instances * node.cost[resource.name](alloc_resource)
if system.sampling_time > 0.0:
cost *= system.sampling_time
costs.append(cost)
return costs
|
[
"def calculate_capacity_for(m_name, m_pods, m_cpu, m_mem, node_map):\n # print(\n # f\"Checking capacity of metric: {m_name}\\n\"\n # f\" CPU: {m_cpu}\\n\"\n # f\" memory: {m_mem}\\n\"\n # f\" pods: {m_pods}\"\n # )\n\n metric_capacity = 0\n for node in node_map.values():\n # print(f\"Examining available capacity in node: {node['name']}\")\n pods = node[\"available\"][\"pods\"]\n cpu = node[\"available\"][\"cpu\"]\n mem = node[\"available\"][\"memory\"]\n\n if pods < 1:\n continue\n\n node_capacity = 0\n\n # print(f\"Comparing required CPU: {m_cpu} to node available CPU: {cpu}\")\n if m_cpu is not None and m_cpu > 0:\n if m_cpu >= cpu:\n continue\n\n m_count = floor(cpu / m_cpu)\n # print(\n # f\"Node has {m_count} capacity in terms of CPU (req: {m_cpu}, avail: {cpu})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n # print(f\"Comparing required Memory: {m_mem} to node available Memory: {mem}\")\n if m_mem is not None and m_mem > 0:\n if m_mem >= mem:\n continue\n\n m_count = floor(mem / m_mem)\n # print(\n # f\"Node has {m_count} capacity in terms of Memory (req: {m_mem}, avail: {mem})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n node_capacity = 1 if node_capacity < 1 else min(node_capacity, pods)\n # print(f\"Node: {node['name']} has CPU/memory capacity: {node_capacity}\")\n\n metric_capacity += node_capacity\n # print(\n # f\"After adding capacity {node_capacity} on node: {node['name']}, \" \\\n # f\"capacity of {m_name} is {metric_capacity}\\n\"\n # )\n\n # print(f\"Comparing required pods: {m_pods} to total available pods: {metric_capacity}\")\n if m_pods is not None and metric_capacity > m_pods:\n metric_capacity = floor(metric_capacity / m_pods)\n\n # print(\n # f\"After factoring out pod-count / cluster capacity {m_pods}, capacity of {m_name} is {metric_capacity}\\n\\n\"\n # )\n\n return metric_capacity",
"def resource_availability():\n return dict(nodes_free=randrange(1, 500))",
"def enrich_node_map(node_map):\n for nodeInfo in node_map.values():\n nodeInfo[\"allCpuRequests\"] = sum(nodeInfo[\"cpuRequests\"])\n nodeInfo[\"allMemoryRequests\"] = sum(nodeInfo[\"memoryRequests\"])\n nodeInfo[\"allCpuLimits\"] = sum(nodeInfo[\"cpuLimits\"])\n nodeInfo[\"allMemoryLimits\"] = sum(nodeInfo[\"memoryLimits\"])\n # print(\n # f\"[{nodeInfo['name']}] node capacity summary:\"\n # f\"\\nCalculated {nodeInfo['allCpuRequests']} \"\n # + f\"from {len(nodeInfo['cpuRequests'])} pod CPU requests\"\n # + f\"\\n {nodeInfo['allMemoryRequests']} \"\n # + f\"from {len(nodeInfo['memoryRequests'])} pod memory requests\"\n # + f\"from {len(nodeInfo['memoryRequests'])} pod memory requests\"\n # + f\"\\nCalculated from {len(nodeInfo['pods'])} total pods\"\n # + f\"\\nNode has: {nodeInfo['cpuAllocatable']} CPU to allocate\"\n # + f\"\\n {nodeInfo['memoryAllocatable']} memory to allocate\"\n # + f\"\\n {nodeInfo['podAllocatable']} pods to allocate\"\n # )\n\n available = {\n \"cpu\": nodeInfo[\"cpuAllocatable\"] - nodeInfo[\"allCpuRequests\"],\n \"memory\": nodeInfo[\"memoryAllocatable\"] - nodeInfo[\"allMemoryRequests\"],\n \"pods\": nodeInfo[\"podAllocatable\"] - len(nodeInfo[\"pods\"]),\n }\n nodeInfo[\"available\"] = available\n\n commitments = dict()\n nodeInfo[\"commitments\"] = commitments\n\n commitments[\"cpuLimit\"] = nodeInfo[\"allCpuLimits\"] / nodeInfo[\"cpuAllocatable\"]\n commitments[\"MemoryLimit\"] = (\n nodeInfo[\"allMemoryLimits\"] / nodeInfo[\"memoryAllocatable\"]\n )\n commitments[\"cpuRequest\"] = (\n nodeInfo[\"allCpuRequests\"] / nodeInfo[\"cpuAllocatable\"]\n )\n commitments[\"MemoryRequest\"] = (\n nodeInfo[\"allMemoryRequests\"] / nodeInfo[\"memoryAllocatable\"]\n )\n\n commitments[\"pod\"] = len(nodeInfo[\"pods\"]) / nodeInfo[\"podAllocatable\"]",
"def get_node_capacity(graph, procPower, bufferSize):\n for node in graph.vertices():\n #append the node to the working dict\n validator_node[node] = []\n #calculate time to confirm transactions\n tc = int(660*2**(-procPower/5))\n #print(tc)\n #calculate number of confirmed transactions\n N = graph.vertex_degree(node)\n confTxns = int(9100*(2**(-N/5) + (bufferSize * 0.03)))\n #print(confTxns)\n #calculate overall capacity of node\n z = confTxns / tc\n #append value to node list \n validator_node[node].append(int(z))\n #print(\"Node's capacity (transactions/sec): \", validator_node)",
"def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )",
"def test_get_total_allocatable_mem_cpu_worker_node(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n total_allocatable_memory, total_allocatable_cpu = node_api_obj.get_total_allocatable_mem_cpu(\"worker\")\n if not total_allocatable_memory:\n assert False, \"Failed to get total allocatable memory from worker nodes in the cluster\"\n assert isinstance(total_allocatable_memory, int)\n if not total_allocatable_cpu:\n assert False, \"Failed to get total allocatable cpu from worker nodes in the cluster\"\n assert isinstance(total_allocatable_cpu, int)",
"def _add_compute_nodes(scheduler_commands, slots_per_node, number_of_nodes=1):\n initial_compute_nodes = scheduler_commands.get_compute_nodes()\n\n number_of_nodes = len(initial_compute_nodes) + number_of_nodes\n # submit a job to perform a scaling up action and have new instances\n result = scheduler_commands.submit_command(\"sleep 1\", nodes=number_of_nodes, slots=slots_per_node)\n job_id = scheduler_commands.assert_job_submitted(result.stdout)\n scheduler_commands.wait_job_completed(job_id)\n scheduler_commands.assert_job_succeeded(job_id)\n\n return [node for node in scheduler_commands.get_compute_nodes() if node not in initial_compute_nodes]",
"def get_reserved_allocation(self) -> ReservedAllocation:\n # Returns the solution as a list of numbers, each one\n # representing the required number of vms of each reserved type, stored\n # in the field \"vms_number\" of the object.\n\n # This number is valid for any workload tuple, and for every timeslot\n # in the reservation period. Also, it does not depend on the applications\n # because it is the total number of reserved instances for all apps.\n\n # The returned class also stores the list \"instance_classes\" which provides\n # the instance class associated with each index in the above table.\n\n # So, if r is the value returned, the value of r.vms_number[i]\n # (being i an integer) is the number of VMs to be allocated\n # from reserved instance class r.instance_classes[i], for every\n # timeslot and for the set of all apps.\n\n # This is all the information required for PhaseII.\n\n if self.pulp_problem.status != pulp.LpStatusOptimal:\n raise ValueError(\"Cannot get the cost when the status is not optimal\")\n\n allocation: List[float] = []\n for _ in self.load_hist: # Loop over all possible workloads\n workload_allocation: List[float] = []\n for iclass in self.cooked.instances_res:\n i_allocation = sum(\n self.cooked.map_res[app, iclass].varValue\n for app in self.system.apps\n )\n workload_allocation.append(i_allocation)\n\n # The obtained allocation MUST be the same for any workload\n assert allocation == [] or allocation == workload_allocation\n allocation = workload_allocation\n\n return ReservedAllocation(\n instance_classes=tuple(self.cooked.instances_res),\n vms_number=tuple(allocation),\n )",
"def get_node_vcpus(self, name):\n #\n # Default to zero, because if for some reason the node can't be found\n # (i.e. it was deleted in the background), then it will not be using\n # any cpus\n #\n vcpus = 0\n\n try:\n configDict = self.get_node_resource_adapter_config(\n NodesDbHandler().getNode(self.session, name)\n )\n\n vcpus = configDict.get('vcpus', 0)\n if not vcpus:\n vcpus = self.get_instance_size_mapping(configDict['type'])\n\n except NodeNotFound:\n pass\n\n return vcpus",
"def resource_slots(self) -> int:",
"def test_get_total_allocatable_mem_cpu(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n total_allocatable_memory, total_allocatable_cpu = node_api_obj.get_total_allocatable_mem_cpu()\n if not total_allocatable_memory:\n assert False, \"Failed to get total allocatable memory from cluster\"\n assert isinstance(total_allocatable_memory, int)\n if not total_allocatable_cpu:\n assert False, \"Failed to get total allocatable cpu from cluster\"\n assert isinstance(total_allocatable_cpu, int)",
"def _updateCost(self):\n\n self._numNodes = len(self.multiplicity)\n\n numDiscrete = np.sum(self.multiplicity > 1)\n hasShared = np.any(self.multiplicity == 1)\n self._numAxons = numDiscrete + hasShared\n\n self._cost = self._numAxons / self._maxNumAxonCfgEntries",
"def consolidate_instance_vcpus(sample):\n name = \"instance_vcpus\"\n payload = sample[\"resource_metadata\"][\"flavor\"][\"vcpus\"]\n (address, sourcedict, timestamp) = get_core_triple(payload, sample, name)\n sourcedict[\"metric_unit\"] = \"vcpu\"\n return (address, sourcedict, timestamp, payload)",
"def capacity(self):\r\n if self.learning_rule == 'Hebbian':\r\n self._capacity = self.nbr_attractors / (2 * log(self.nbr_attractors))\r\n\r\n elif self.learning_rule == 'Storkey':\r\n self._capacity = self.nbr_attractors / (sqrt(2 * log(self.nbr_attractors)))\r\n\r\n print('Network\\'s capacity is {}'.format(round(self._capacity, 2)))",
"def _get_cpu_shares(self, instance):\n if isinstance(instance, objects.Instance):\n flavor = instance.get_flavor()\n else:\n flavor = flavors.extract_flavor(instance)\n return int(flavor['vcpus']) * 1024",
"def label_and_taint_nodes(self):\n\n # TODO: remove this \"heuristics\", it doesn't belong there, the process\n # should be explicit and simple, this is asking for trouble, bugs and\n # silently invalid deployments ...\n # See https://github.com/red-hat-storage/ocs-ci/issues/4470\n arbiter_deployment = config.DEPLOYMENT.get(\"arbiter_deployment\")\n\n nodes = ocp.OCP(kind=\"node\").get().get(\"items\", [])\n\n worker_nodes = [\n node\n for node in nodes\n if constants.WORKER_LABEL in node[\"metadata\"][\"labels\"]\n ]\n if not worker_nodes:\n raise UnavailableResourceException(\"No worker node found!\")\n az_worker_nodes = {}\n for node in worker_nodes:\n az = node[\"metadata\"][\"labels\"].get(constants.ZONE_LABEL)\n az_node_list = az_worker_nodes.get(az, [])\n az_node_list.append(node[\"metadata\"][\"name\"])\n az_worker_nodes[az] = az_node_list\n logger.debug(f\"Found the worker nodes in AZ: {az_worker_nodes}\")\n\n if arbiter_deployment:\n to_label = config.DEPLOYMENT.get(\"ocs_operator_nodes_to_label\", 4)\n else:\n to_label = config.DEPLOYMENT.get(\"ocs_operator_nodes_to_label\")\n\n distributed_worker_nodes = []\n if arbiter_deployment and config.DEPLOYMENT.get(\"arbiter_autodetect\"):\n for az in list(az_worker_nodes.keys()):\n az_node_list = az_worker_nodes.get(az)\n if az_node_list and len(az_node_list) > 1:\n node_names = az_node_list[:2]\n distributed_worker_nodes += node_names\n elif arbiter_deployment and not config.DEPLOYMENT.get(\"arbiter_autodetect\"):\n to_label_per_az = int(\n to_label / len(config.ENV_DATA.get(\"worker_availability_zones\"))\n )\n for az in list(config.ENV_DATA.get(\"worker_availability_zones\")):\n az_node_list = az_worker_nodes.get(az)\n if az_node_list and len(az_node_list) > 1:\n node_names = az_node_list[:to_label_per_az]\n distributed_worker_nodes += node_names\n else:\n raise UnavailableResourceException(\n \"Atleast 2 worker nodes required for arbiter cluster in zone %s\",\n az,\n )\n else:\n while az_worker_nodes:\n for az in list(az_worker_nodes.keys()):\n az_node_list = az_worker_nodes.get(az)\n if az_node_list:\n node_name = az_node_list.pop(0)\n distributed_worker_nodes.append(node_name)\n else:\n del az_worker_nodes[az]\n logger.info(f\"Distributed worker nodes for AZ: {distributed_worker_nodes}\")\n\n to_taint = config.DEPLOYMENT.get(\"ocs_operator_nodes_to_taint\", 0)\n\n distributed_worker_count = len(distributed_worker_nodes)\n if distributed_worker_count < to_label or distributed_worker_count < to_taint:\n logger.info(f\"All nodes: {nodes}\")\n logger.info(f\"Distributed worker nodes: {distributed_worker_nodes}\")\n raise UnavailableResourceException(\n f\"Not enough distributed worker nodes: {distributed_worker_count} to label: \"\n f\"{to_label} or taint: {to_taint}!\"\n )\n\n _ocp = ocp.OCP(kind=\"node\")\n workers_to_label = \" \".join(distributed_worker_nodes[:to_label])\n if workers_to_label:\n logger.info(\n f\"Label nodes: {workers_to_label} with label: \"\n f\"{constants.OPERATOR_NODE_LABEL}\"\n )\n label_cmds = [\n (\n f\"label nodes {workers_to_label} \"\n f\"{constants.OPERATOR_NODE_LABEL} --overwrite\"\n )\n ]\n if config.DEPLOYMENT.get(\"infra_nodes\") and not config.ENV_DATA.get(\n \"infra_replicas\"\n ):\n logger.info(\n f\"Label nodes: {workers_to_label} with label: \"\n f\"{constants.INFRA_NODE_LABEL}\"\n )\n label_cmds.append(\n f\"label nodes {workers_to_label} \"\n f\"{constants.INFRA_NODE_LABEL} --overwrite\"\n )\n\n for cmd in label_cmds:\n _ocp.exec_oc_cmd(command=cmd)\n\n workers_to_taint = \" \".join(distributed_worker_nodes[:to_taint])\n if workers_to_taint:\n logger.info(\n f\"Taint nodes: {workers_to_taint} with taint: \"\n f\"{constants.OPERATOR_NODE_TAINT}\"\n )\n taint_cmd = (\n f\"adm taint nodes {workers_to_taint} {constants.OPERATOR_NODE_TAINT}\"\n )\n _ocp.exec_oc_cmd(command=taint_cmd)",
"def compute_node_stats(self, ctxt):\n responses = self.msg_runner.compute_node_stats(ctxt)\n totals = {}\n for response in responses:\n data = response.value_or_raise()\n for key, val in data.iteritems():\n totals.setdefault(key, 0)\n totals[key] += val\n return totals",
"def cost_network(self):\n self.rail.cost_network()\n self.road.cost_network()",
"def allocations(self):\n max_clients = self.clients\n allocations = [None] * max_clients\n for client_index in range(max_clients):\n allocations[client_index] = []\n join_point_id = 0\n # start with an artificial join point to allow master to coordinate that all clients start at the same time\n next_join_point = JoinPoint(join_point_id)\n for client_index in range(max_clients):\n allocations[client_index].append(next_join_point)\n join_point_id += 1\n\n for task in self.schedule:\n start_client_index = 0\n clients_executing_completing_task = []\n any_task_completes_parent = []\n for sub_task in task:\n for client_index in range(start_client_index, start_client_index + sub_task.clients):\n # this is the actual client that will execute the task. It may differ from the logical one in case we over-commit (i.e.\n # more tasks than actually available clients)\n physical_client_index = client_index % max_clients\n if sub_task.completes_parent:\n clients_executing_completing_task.append(physical_client_index)\n elif sub_task.any_completes_parent:\n any_task_completes_parent.append(physical_client_index)\n\n ta = TaskAllocation(\n task=sub_task,\n client_index_in_task=client_index - start_client_index,\n global_client_index=client_index,\n # if task represents a parallel structure this is the total number of clients\n # executing sub-tasks concurrently.\n total_clients=task.clients,\n )\n allocations[physical_client_index].append(ta)\n start_client_index += sub_task.clients\n # uneven distribution between tasks and clients, e.g. there are 5 (parallel) tasks but only 2 clients. Then, one of them\n # executes three tasks, the other one only two. So we need to fill in a `None` for the second one.\n if start_client_index % max_clients > 0:\n # pin the index range to [0, max_clients). This simplifies the code below.\n start_client_index = start_client_index % max_clients\n for client_index in range(start_client_index, max_clients):\n allocations[client_index].append(None)\n\n # let all clients join after each task, then we go on\n next_join_point = JoinPoint(join_point_id, clients_executing_completing_task, any_task_completes_parent)\n for client_index in range(max_clients):\n allocations[client_index].append(next_join_point)\n join_point_id += 1\n return allocations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
set default value for location based on selected warehouse
|
def default_get(self, fields):
result = super(SaleOrder, self).default_get(fields)
if 'warehouse_id' in result:
warehouse_obj = self.env['stock.warehouse']
result['location_id'] = warehouse_obj.browse(result['warehouse_id']).lot_stock_id.id
return result
|
[
"def _onchange_warehouse_location_domain(self):\n\n location_obj = self.env['stock.location']\n location_id = self.warehouse_id.lot_stock_id # main warehouse location\n location_parent = location_id.location_id # location id is parent location n model stock.location\n\n self.location_id = location_id\n child_locations = location_obj.search([('id', 'child_of', location_parent.id), ('usage', '=', 'internal')])\n\n return {'domain': {'location_id': [('id', 'in', child_locations.ids), ('usage', '=', 'internal')]}}",
"def print_default_location_warning(_, args, request):\n if not (properties.VALUES.workflows.location.IsExplicitlySet() or\n args.IsSpecified(\"location\")):\n log.warning(\"The default location(us-central1) was used since the location \"\n \"flag was not specified.\")\n return request",
"def set_default_storage_location(cls, storage_location: str) -> None:\n if storage_location:\n storage_dict = {'storage_location': storage_location}\n cls.__save(storage_dict)",
"def getLocation():\n location=input(\"please input the location you want to look at : \")\n if not location:\n location = LOCATION\n return location",
"def default_storage_location(self) -> Optional[\"StorageLocation\"]:\n whens = [\n models.When(\n connector_name=connector_name,\n then=connectors[connector_name].priority,\n )\n for connector_name in connectors\n ]\n q_set_all = self.storage_locations.annotate(\n priority=models.Case(\n *whens,\n default=DEFAULT_CONNECTOR_PRIORITY,\n output_field=models.IntegerField(),\n )\n ).order_by(\"priority\")\n q_set_done = q_set_all.filter(status=StorageLocation.STATUS_DONE)\n return q_set_done.first() or q_set_all.first()",
"def set_location(self, v):\n self.location = v",
"def set_adm_location(self):\n adm_q = None\n municip_dic = self.data_files[\"municipalities\"]\n region_dict = self.data_files[\"regions\"]\n\n municip_q = utils.q_from_first_wikilink(\"es\", self.comuna)\n if utils.get_item_from_dict_by_key(dict_name=municip_dic,\n search_term=municip_q,\n search_in=\"item\"):\n adm_q = municip_q\n else:\n self.add_to_report(\"comuna\", self.comuna, \"located_adm\")\n\n if adm_q is None:\n iso_match = utils.get_item_from_dict_by_key(\n dict_name=region_dict,\n search_term=self.ISO,\n search_in=\"iso\")\n if len(iso_match) == 1:\n adm_q = iso_match[0]\n else:\n self.add_to_report(\"ISO\", self.ISO, \"located_adm\")\n\n if adm_q:\n self.add_statement(\"located_adm\", adm_q)",
"def get_default_location(self):\n address = input(\"Enter Default Location: \").replace(\" \", \"+\")\n if os.path.exists(self.user_data):\n with open(self.user_data) as infile:\n self.data = json.load(infile)\n self.data['add'] = str(address)\n with open(self.user_data, 'w') as outfile:\n json.dump(self.data, outfile)\n self.default_location = self.get_lat_log(address)\n self.prev_location = self.default_location\n #the dufalut starting location of the user can be entered for the first time or updated \n #from here. everytime an update is made, the json file storing the data locally is also updated.",
"def random_location(self):\n location_key = random.choice(list(self.data[\"capitalof\"]))\n if bool(random.getrandbits(1)):\n location = location_key\n else:\n location = self.data[\"capitalof\"][location_key]\n return location",
"def set_adm_location(self):\n match = None\n if self.has_non_empty_attribute(\"municipio\"):\n try_match = utils.q_from_first_wikilink(\"es\", self.municipio)\n link_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"admin\"],\n search_term=try_match,\n search_in=\"item\")\n if len(link_match) == 1:\n match = link_match[0]\n else:\n self.add_to_report(\"municipio\", self.municipio, \"located_adm\")\n if not match:\n dep_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"departments\"],\n search_term=self.iso,\n search_in=\"iso\")\n if len(dep_match) == 1:\n match = dep_match[0]\n else:\n self.add_to_report(\"iso\", self.iso, \"located_adm\")\n\n if match:\n self.add_statement(\"located_adm\", match)",
"def set_location(self, provider_title):\n\n provider = self.app.providers.get(provider_title)\n provider(self.app).config_location()",
"def choose_new_location(self, new_locations):\n\t\tpass",
"def setLogicalLocation(self, string: str) -> None:\n ...",
"def _create_region_from_default(self, region):\n options = self.default_region_options.copy()\n self.regions[region] = options\n self.bcm.regions.update({region: options})\n beaker.cache.cache_regions.update({region: options})",
"def add_to_warehouse(self, warehouse):\n if self.is_assigned():\n assignment = self.current_property()\n assignment.active = False\n assignment.save()\n wf = self.warehousefurniture_set.create(warehouse=warehouse)\n wf.save()\n return wf",
"def store_dataset_data_location(dataset: Dataset, location: Optional[Path]) -> None:\n section = \"dataset-locations\"\n key = dataset.name\n\n if not location:\n remove_value(section=section, key=key)\n else:\n set_value(section=section, key=key, value=get_absolute_path(location))",
"def location(self, location):\n if location is None:\n raise ValueError(\"Invalid value for `location`, must not be `None`\")\n\n self._location = location",
"def setDefault(key, value, context=None):",
"def getStockLocation(self):\n\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
in case wharehouse change then we need to change location to default location of new selected wharehouse also set domain for child of new selected whrehouse
|
def _onchange_warehouse_location_domain(self):
location_obj = self.env['stock.location']
location_id = self.warehouse_id.lot_stock_id # main warehouse location
location_parent = location_id.location_id # location id is parent location n model stock.location
self.location_id = location_id
child_locations = location_obj.search([('id', 'child_of', location_parent.id), ('usage', '=', 'internal')])
return {'domain': {'location_id': [('id', 'in', child_locations.ids), ('usage', '=', 'internal')]}}
|
[
"def choose_new_location(self, new_locations):\n\t\tpass",
"def set_adm_location(self):\n match = None\n if self.has_non_empty_attribute(\"municipio\"):\n try_match = utils.q_from_first_wikilink(\"es\", self.municipio)\n link_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"admin\"],\n search_term=try_match,\n search_in=\"item\")\n if len(link_match) == 1:\n match = link_match[0]\n else:\n self.add_to_report(\"municipio\", self.municipio, \"located_adm\")\n if not match:\n dep_match = utils.get_item_from_dict_by_key(\n dict_name=self.data_files[\"departments\"],\n search_term=self.iso,\n search_in=\"iso\")\n if len(dep_match) == 1:\n match = dep_match[0]\n else:\n self.add_to_report(\"iso\", self.iso, \"located_adm\")\n\n if match:\n self.add_statement(\"located_adm\", match)",
"def setLogicalLocation(self, string: str) -> None:\n ...",
"def edit_location(self):\n self.update()",
"def set_adm_location(self):\n adm_q = None\n municip_dic = self.data_files[\"municipalities\"]\n region_dict = self.data_files[\"regions\"]\n\n municip_q = utils.q_from_first_wikilink(\"es\", self.comuna)\n if utils.get_item_from_dict_by_key(dict_name=municip_dic,\n search_term=municip_q,\n search_in=\"item\"):\n adm_q = municip_q\n else:\n self.add_to_report(\"comuna\", self.comuna, \"located_adm\")\n\n if adm_q is None:\n iso_match = utils.get_item_from_dict_by_key(\n dict_name=region_dict,\n search_term=self.ISO,\n search_in=\"iso\")\n if len(iso_match) == 1:\n adm_q = iso_match[0]\n else:\n self.add_to_report(\"ISO\", self.ISO, \"located_adm\")\n\n if adm_q:\n self.add_statement(\"located_adm\", adm_q)",
"def _relocate(self, new_location):\n self._current_location = new_location\n if self._current_location.query_path in self._aliases:\n self._current_alias = self._aliases[self._current_location.query_path]\n else:\n self._current_alias = (\n self._sql_schema_info.vertex_name_to_table[self._current_classname].alias()\n )",
"def setlocation(self, location, tree):\n if location and location[-1] == \"/\":\n location = location[:-1]\n try:\n location, name = location.rsplit(\"/\", 1)\n except ValueError: # ... value to unpack\n raise ValueError(\"Location should not point to root node.\")\n node = self.locate(location)\n node[name] = tree",
"def moveTo(self, newParent: ghidra.framework.model.DomainFolder) -> ghidra.framework.model.DomainFolder:\n ...",
"def default_get(self, fields):\n\n result = super(SaleOrder, self).default_get(fields)\n if 'warehouse_id' in result:\n warehouse_obj = self.env['stock.warehouse']\n result['location_id'] = warehouse_obj.browse(result['warehouse_id']).lot_stock_id.id\n return result",
"def determine_new_master(self):\n self.master_host = determine_host_address()",
"def _set_home_origin():\n util.log_info('Setting HOME ORIGIN.')\n shared.home_origin = LocationGlobalRelative( 31.2991103, # simulated origin\n 121.4953190,\n 9 )\n util.log_info(\"HOME_ORIGIN: %s\" % shared.home_origin)",
"def __build_location_stuff( self, data_dict ):\n location = etree.SubElement( self.mods, self.MODS+'location' )\n location_physical_location = etree.SubElement( location, self.MODS+'physicalLocation' )\n location_physical_location.text = 'Bell Art Gallery'\n location_holdings_simple = etree.SubElement( location, self.MODS+'holdingSimple' )\n location_holdings_simple_copy_information = etree.SubElement( location_holdings_simple, self.MODS+'copyInformation' )\n location_holdings_simple_copy_information_shelf_locator = etree.SubElement( location_holdings_simple_copy_information, self.MODS+'shelfLocator' )\n location_holdings_simple_copy_information_shelf_locator.text = data_dict[ 'MEDIA::object_medium_name' ]",
"def update_artella_paths():\n\n return None",
"def make_location(**kwargs):\n loc_type_name = kwargs.pop('location_type')\n try:\n sql_location_type = LocationType.objects.get(\n domain=kwargs['domain'],\n name=loc_type_name,\n )\n except LocationType.DoesNotExist:\n msg = \"You can't create a location without a real location type\"\n raise LocationType.DoesNotExist(msg)\n kwargs['location_type'] = sql_location_type\n parent = kwargs.pop('parent', None)\n kwargs['parent'] = parent.sql_location if parent else None\n return SQLLocation(**kwargs)",
"def __link_place(self, parent, plat, plon):\n selector = SelectPlace(self.dbstate, self.uistate, [])\n place = selector.run()\n if place:\n self.select_fct.close()\n place.set_latitude(str(plat))\n place.set_longitude(str(plon))\n if parent:\n placeref = PlaceRef()\n placeref.ref = parent\n place.add_placeref(placeref)\n try:\n EditPlace(self.dbstate, self.uistate, [], place)\n self.add_marker(None, None, plat, plon, None, True, 0)\n except WindowActiveError:\n pass",
"def setExitLocation(self, location):\n if location == None:\n self.gateway.exitPortal = None\n elif location.getWorld() != self.world:\n raise IllegalArgumentException(\"Cannot set exit location to different world\")\n else:\n self.gateway.exitPortal = BlockPosition(location.getBlockX(), location.getBlockY(), location.getBlockZ())",
"def setup_domain(self):\n\t\tself.setup_data()\n\t\tself.setup_roles()\n\t\tself.setup_properties()\n\t\tself.set_values()\n\n\t\tif not int(frappe.defaults.get_defaults().setup_complete or 0):\n\t\t\t# if setup not complete, setup desktop etc.\n\t\t\tself.setup_sidebar_items()\n\t\t\tself.set_default_portal_role()\n\n\t\tif self.data.custom_fields:\n\t\t\tcreate_custom_fields(self.data.custom_fields)\n\n\t\tif self.data.on_setup:\n\t\t\t# custom on_setup method\n\t\t\tfrappe.get_attr(self.data.on_setup)()",
"def test_locations_correct(self):\n location = \"/var/www/foo/\"\n handle_servername = ServerName(**{\n \"domain\" : self.valid_domain,\n }\n )\n handle_location = Location(**{\n \"location\" : location\n }\n )\n handle_servername.locations = handle_location\n self.assertEqual(handle_servername.locations[location].location, location)\n del handle_location\n del handle_servername",
"def save(self):\n if not self.slug:\n self.slug = slugify(self.name) \n super(Location, self).save()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read configuration file from config.ini
|
def read_config():
config = cp.ConfigParser()
config.read("config.ini")
return config
|
[
"def _read_config():\n\n import configparser\n import os\n\n basepath = os.getcwd()\n prev = None\n while basepath != prev:\n prev = basepath\n path = os.path.join(basepath, 'uriconfig.ini')\n if os.path.exists(path):\n break\n basepath = os.path.split(basepath)[0]\n\n parser = configparser.ConfigParser()\n parser.read(path)\n return parser",
"def read_config_file(self):\n self.logger.debug('reading config file')\n path = Path(self.conversion_settings.working_directory, self._config_file)\n\n if path.is_file():\n self.read(path)\n self.logger.info(f'Data read from INI file is {self.__repr__()}')\n else:\n self.logger.warning(f'config.ini missing at {path}, generating new file and settings set to default.')\n if not config.silent:\n print(\"config.ini missing, generating new file.\")\n self.conversion_settings = self._default_quick_setting",
"def readConfig(self):\n ##Open ConfigFile\n self.config=ConfigObj(infile='sims/tcpwater/config', unrepr=True)",
"def __readConfig(self):\r\n\r\n\t\tfr = open(self.__configFilePath, 'r')\r\n\t\t\r\n\r\n\t\tfor line in fr.readlines():\r\n\t\t\tline = line.strip()\r\n\t\t\tif line == \"\":\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif line[0] != '#': # ignore lines start by #\r\n\t\t\t\tsp = line.split('=')\r\n\t\t\t\tif len(sp) == 2:\r\n\t\t\t\t\tkey = sp[0].strip()\r\n\t\t\t\t\tval = sp[1].strip()\r\n\t\t\t\t\tself.__configDict[key] = val\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__print(\"Ignore config line: \" + line)\r\n\r\n\t\tself.__print(\"Read configs from: %s\\n%d configs read!\" \\\r\n\t\t\t\t\t\t\t\t % (self.__configFilePath, len(self.__configDict)) \\\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\tfr.close()",
"def open_ini(self, file_name):\r\n config = ConfigParser.ConfigParser()\r\n file_pointer = open(file_name)\r\n config.readfp(file_pointer, file_name)\r\n return config",
"def config_reader(conf_path):\n config = configparser.ConfigParser()\n config.read(conf_path)\n return config",
"def read_config():\n try:\n config_file = open(CONFIG_LOCATION)\n except IOError:\n print_error('Could not open the configuration file.')\n raise\n try:\n settings = json.loads(config_file.read())\n except ValueError:\n print_error('Could not parse the configuration file.')\n raise\n return settings",
"def load_cfg(self,filepath):\n config = configparser.ConfigParser()\n config.read([filepath])\n return config",
"def read_config(path_to_ini: Optional[str] = None):\n if path_to_ini is None:\n path_to_ini = os.path.join(ROOT_DIR, \"pycd.ini\")\n\n config = configparser.ConfigParser()\n config.read(path_to_ini) # Read file\n\n if \"branches\" not in config.sections():\n raise NotCompleteSetupFile(\n \"No `branches` section in {ini}\".format(ini=path_to_ini)\n )\n\n if \"directories\" not in config.sections():\n raise NotCompleteSetupFile(\n \"No `directories` section in {ini}\".format(ini=path_to_ini)\n )\n\n if \"git\" not in config.sections():\n raise NotCompleteSetupFile(\"No `git` section in {ini}\".format(ini=path_to_ini))\n\n dev_branch = config[\"branches\"].get(\"dev\")\n master_branch = config[\"branches\"].get(\"master\")\n\n dev_path = config[\"directories\"].get(\"dev\")\n master_path = config[\"directories\"].get(\"master\")\n\n git_url = config[\"git\"].get(\"url\")\n\n return {\n \"directories\": {\"dev\": dev_path, \"master\": master_path},\n \"branches\": {\"dev\": dev_branch, \"master\": master_branch},\n \"git\": {\"url\": git_url},\n }",
"def read_config_file():\n \n MIN_RUN_TIME = 300 # min five minutes between runs\n \n config = configparser.ConfigParser(allow_no_value=True)\n configdata = {}\n \n config.read('backgrounder.ini')\n \n configdata['path'] = {}\n configdata['path']['image'] = config['path']['image']\n configdata['subreddits'] = config['subreddits']['subreddits']\n configdata['postsave'] = config['postsave']['method']\n configdata['timing'] = config['timing']['seconds']\n configdata['other'] = {}\n configdata['other']['ignore_duplicates'] = config['other']['ignore_duplicates']\n configdata['other']['download_gallery'] = config['other']['download_gallery']\n \n # validate user-entered config\n valid_dict = validate_config(configdata)\n for key, val in valid_dict.items():\n if val is False:\n messagebox.showinfo('Warning', 'There was an error reading backgrounder.ini.\\n\\nPlease delete your data.pkl file and rerun the program.'\n % (key))\n return None\n \n process_configdata(configdata)\n \n return configdata",
"def read_config(self):\n path = pathlib.Path.home() / '.config' / 'ccmk' / 'config.yaml'\n if not path.exists():\n msg = (f\"The configuration file storing your checkmk credentials \"\n f\"- {path} - does not exist. Please create it.\")\n raise RuntimeError(msg)\n with path.open(mode='rt') as f:\n self.config = yaml.load(f)",
"def read_config_file(self):\n\n # try to import the python2 ConfigParser\n # if unable to import, then try to import the python3 configparser\n try:\n import ConfigParser as configparser\n except ImportError:\n import configparser\n\n config = configparser.ConfigParser()\n\n try:\n config.read(self.config)\n except EnvironmentError:\n sys.exit(\"Unable to read from the config file: \" + self.config)\n\n for section in config.sections():\n config_list = config.items(section)\n for name,value in config_list:\n yield name, value",
"def init_config_file():\n\n print('setting up config.ini file.')\n copyfile(CONFIG_TEMPLATE_PATH, CONFIG_FILE_PATH)\n config.readfp(open(CONFIG_FILE_PATH))",
"def read_config_file():\n config = ConfigParser.ConfigParser()\n config.read(FLAGS.config_file)\n sentiment_section = \"sentiment_network_params\"\n general_section = \"general\"\n dic = {\n \"num_layers\": config.getint(sentiment_section, \"num_layers\"),\n \"hidden_size\": config.getint(sentiment_section, \"hidden_size\"),\n \"dropout\": config.getfloat(sentiment_section, \"dropout\"),\n \"batch_size\": config.getint(sentiment_section, \"batch_size\"),\n \"train_frac\": config.getfloat(sentiment_section, \"train_frac\"),\n \"learning_rate\": config.getfloat(sentiment_section, \"learning_rate\"),\n \"lr_decay_factor\": config.getfloat(\n sentiment_section, \"lr_decay_factor\"),\n \"grad_clip\": config.getint(sentiment_section, \"grad_clip\"),\n \"use_config_file_if_checkpoint_exists\": config.getboolean(\n general_section, \"use_config_file_if_checkpoint_exists\"),\n \"max_epoch\": config.getint(sentiment_section, \"max_epoch\"),\n \"max_vocab_size\": config.getint(sentiment_section, \"max_vocab_size\"),\n \"max_seq_length\": config.getint(general_section, \"max_seq_length\"),\n \"steps_per_checkpoint\": config.getint(\n general_section, \"steps_per_checkpoint\")\n }\n return dic",
"def read_config(self, config=\"~/.cloudfuse\"):\n config = os.path.expanduser(config)\n try:\n fd = open(config, \"r\")\n except IOError:\n logging.warning(\"Failed to read config file %r\" % config)\n return\n try:\n for line in fd:\n try:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n key, value = line.split(\"=\", 1)\n key = key.strip()\n value = value.strip()\n if key not in self.CONFIG_KEYS:\n logging.warning(\"Ignoring unknown config key %r\" % key)\n continue\n if key in self.INT_CONFIG_KEYS:\n key = int(key)\n if key in self.BOOL_CONFIG_KEYS:\n key = value == \"true\"\n logging.debug(\"setting %r = %r from %r\" % (key, value, config))\n setattr(self, key, value)\n except ValueError:\n logging.warning(\"Ignoring bad line in %r: %r\" % (config, line))\n continue\n finally:\n fd.close()",
"def _read_config(fin, silent=False):\n \n # Global parameters to be edited\n global _CONFIG, _BLACKLIST, _TYPO_DELETE_SPACE, _TYPO_DELETE_CHAR\n global _TYPO_SWAP, _TYPO_INSERT, _TYPO_REPLACE\n global _PHONO_DELETE, _PHONO_INSERT, _PHONO_REPLACE, _PHONO_GROUP\n\n # Generate default config if it does not exist\n if pathlib.Path(_DEF_CONFIG).exists() == False:\n _default_config(silent=silent)\n \n # Validate input\n if type(fin) != str and fin != None:\n return None\n\n # Do nothing if input is None\n if fin == None:\n return None\n\n # Do nothing if selected file has already been loaded\n if fin == _CONFIG:\n return None\n\n # Regenerate default config\n if fin == _DEF_CONFIG:\n _CONFIG = _DEF_CONFIG\n return _default_config(silent=silent)\n \n # Read INI file and set (or reset) parameters\n if silent == False:\n print(\"Reading config file '\" + fin + \"' ...\")\n \n # Initialize config parser\n config = configparser.ConfigParser(allow_no_value=True)\n\n # Verify that config file exists\n if pathlib.Path(fin).exists() == False:\n if silent == False:\n print(\"Config file '\" + fin + \"' not found.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Read config file\n config.read(fin)\n \n # Read typographical section\n try:\n key = \"delete_space\"\n _TYPO_DELETE_SPACE = float(config[\"typo\"][key])\n key = \"delete_char\"\n _TYPO_DELETE_CHAR = float(config[\"typo\"][key])\n key = \"swap\"\n _TYPO_SWAP = float(config[\"typo\"][key])\n key = \"insert\"\n _TYPO_INSERT = float(config[\"typo\"][key])\n key = \"replace\"\n _TYPO_REPLACE = float(config[\"typo\"][key])\n except KeyError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'typo' section not found in '\" +\n fin + \"'.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n except ValueError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'typo' section in '\" + fin +\n \"' should be a number.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Validate all typographical parameters as probabilities on [0.0,1.0]\n valid = True\n if _TYPO_DELETE_SPACE < 0 or _TYPO_DELETE_SPACE > 1:\n valid = False\n if _TYPO_DELETE_CHAR < 0 or _TYPO_DELETE_CHAR > 1:\n valid = False\n if _TYPO_SWAP < 0 or _TYPO_SWAP > 1:\n valid = False\n if _TYPO_INSERT < 0 or _TYPO_INSERT > 1:\n valid = False\n if _TYPO_REPLACE < 0 or _TYPO_REPLACE > 1:\n valid = False\n if _TYPO_DELETE_CHAR + _TYPO_INSERT + _TYPO_REPLACE > 1:\n valid = False\n if valid == False:\n if silent == False:\n print(\"Invalid 'typo' parameter read in '\" + fin + \"'.\")\n print(\"All parameters should be probabilities between 0.0 and \" +\n \"1.0.\")\n print(\"The sum of 'delete_char', 'insert', and 'replace' should \" +\n \"not exceed 1.0.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n \n # Read phonological section\n try:\n key = \"delete\"\n _PHONO_DELETE = float(config[\"phono\"][key])\n key = \"insert\"\n _PHONO_INSERT = float(config[\"phono\"][key])\n key = \"replace\"\n _PHONO_REPLACE = float(config[\"phono\"][key])\n key = \"group\"\n _PHONO_GROUP = float(config[\"phono\"][key])\n except KeyError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'phono' section not found in '\" +\n fin + \"'.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n except ValueError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'phono' section in '\" + fin +\n \"' should be a number.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Validate all phonological parameters as probabilities on [0.0,1.0]\n valid = True\n if _PHONO_DELETE < 0 or _PHONO_DELETE > 1:\n valid = False\n if _PHONO_INSERT < 0 or _PHONO_INSERT > 1:\n valid = False\n if _PHONO_REPLACE < 0 or _PHONO_REPLACE > 1:\n valid = False\n if _PHONO_GROUP < 0 or _PHONO_GROUP > 1:\n valid = False\n if _PHONO_DELETE + _PHONO_INSERT + _PHONO_REPLACE > 1:\n valid = False\n if valid == False:\n if silent == False:\n print(\"Invalid 'phono' parameter read in '\" + fin + \"'.\")\n print(\"All parameters should be probabilities between 0.0 and \" +\n \"1.0.\")\n print(\"The sum of 'delete', 'insert', and 'replace' should \" +\n \"not exceed 1.0.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Read blacklist (section not required)\n if \"blacklist\" in config.sections():\n _BLACKLIST = tuple(dict(config.items(\"blacklist\")))\n else:\n _BLACKLIST = _DEF_BLACKLIST\n \n if silent == False:\n print(\"Config file successfully loaded!\")\n\n # Update current config file\n _CONFIG = fin",
"def _read_config(self):\n \n config_file_name = \"config.json\"\n \n if not os.path.isfile(config_file_name):\n raise FileNotFoundError(config_file_name + \" is not found\")\n\n try:\n data = json.load(open(config_file_name))\n except:\n raise ValueError(config_file_name + \" is not a valid json\")\n\n self.queries = self._read_config_value(data, \"query\")\n self.sender = self._read_config_value(data, \"sender\")\n self.recipients = self._read_config_value(data, \"recipient\")",
"def loadConfig(self):\n logging.debug('Reading config file')\n self.config = configparser.ConfigParser()\n self.config.read(\"user/config.ini\")\n\n # Grabbing the variable values as specified in the config file\n self.db_path = self.config.get(\"Data Sources\", \"DB_path\") #\"Data Sources\" refers to the section\n logging.debug(\"DB Path: \"+self.db_path)\n self.watch_path = self.config.get(\"Watch Paths\", \"path_001\")\n self.last_check_watched = self.config.get(\"Other Variables\", \"last_check\")\n self.all_bib_path = self.config.get(\"Bib\", \"all_bib_path\")\n\n # Open the associated arda database\n self.adb = ArDa_DB_SQL()\n self.adb.open_db(self.db_path)",
"def load_ini_config(filename, key=None):\n config = configparser.ConfigParser()\n config.read(filename)\n return _config_helper(config, key)",
"def read_ini():\n try:\n conf = ConfigParser.ConfigParser()\n filename = os.path.join(os.path.dirname(__file__), 'short.ini')\n conf.readfp(open(filename))\n apikey = conf.get('bitly', 'api_key')\n return apikey\n\n except IOError, e:\n print \"Sorry, couldn't read the short.ini file, %s\" % e"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read command line arguments
|
def read_args():
parser = argparse.ArgumentParser(description='Taiko data analysis toolkit')
parser.add_argument('-f', help='Write frames', action='store_true')
return vars(parser.parse_args())
|
[
"def command_line_arguments():\n return sys.argv",
"def main():\n # set up the program to take in arguments from the command line",
"def read_args(self):\n cmd = []\n for index in sys.argv:\n cmd = cmd + index.split(\"=\")\n cmd.pop(0)\n\n\n for index , item in enumerate(cmd):\n if (index % 2 == 0):\n found = False\n \n if ('--help' == item):\n found = True\n if self.legacy == True:\n print(( self.m_help ))\n raise RuntimeError\n \n for flags in self.m_flags:\n if (item == flags): \n\n found = True\n self.m_commands[flags] = cmd[index+1] \n \n \n \n if not found:\n raise RuntimeError\n # ^^ raise an exception if any bad flag is found instead ^^\n # self.m_errors =True\n # self.m_bad_flags.append(item)",
"def read_inputs():\n\n parser = ArgumentParser(description='Manage mesh_sphere command line arguments')\n\n\n parser.add_argument('-rec', '--recursions', dest='rec', type=int, default=None,\n help=\"number of recursions for unit sphere\")\n\n parser.add_argument('-r', '--radius', dest='r', type=float, default=None,\n help=\"radius of the sphere\")\n\n parser.add_argument('-x0', '--x_center', dest='x0', type=float, default=None,\n help=\"x coordinate of the center of sphere\")\n\n parser.add_argument('-y0', '--y_center', dest='y0', type=float, default=None,\n help=\"y coordinate of the center of sphere\")\n\n parser.add_argument('-z0', '--z_center', dest='z0', type=float, default=None,\n help=\"z coordinate of the center of sphere\")\n\n parser.add_argument('-n', '--name', dest='name', type=str, default=None,\n help=\"output file name\")\n\n return parser.parse_args()",
"def main(args):\r\n\tprint args",
"def parse_args():\n parser = argparse.ArgumentParser(description='Create PDB full region table and import new data')\n parser.add_argument('-f', '--file', help='Text file with data to import to pdb_full_region_temp', required=True)\n parser.add_argument('-db', '--database', help='Specify which database config values to use ', required=False)\n parser.add_argument('-nqc', '--no_quality_control_checks',\n help='Whether to execute QC checks ', action='store_true', required=False)\n return parser.parse_args()",
"def read_args(self,filename,varnames):\n for name in varnames:\n self.args[name]=ebf.read(filename,'/'+name)",
"def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n #adding debug argument for parsing\n parser.add_argument('-d', '--debug', help='debug level',\n default='0', choices=('0', '1', '2', '3'))\n\n return parser.parse_args()",
"def parse_command_line_args():\n\n ap = argparse.ArgumentParser(description='Command line converter from input_file.m3u8 to output_file.mp3',\n epilog=\"That's all folks\")\n ap.add_argument(\"-i\", \"--input\", required=False,\n help=\"path to input m3u8 file to be converted\")\n ap.add_argument(\"-o\", \"--output\", required=False,\n help=\"path to output mp3 file\")\n args = vars(ap.parse_args())\n\n input_filepath = args[\"input\"]\n output_filepath = args[\"output\"]\n\n return input_filepath, output_filepath",
"def read_args():\n rtype_dict = {'P':'PP', 'R':'BR', 'S':'SP'}\n\n rindex = int(sys.argv[1])\n try:\n request_type = rtype_dict[sys.argv[2]]\n except (IndexError, KeyError):\n logging.warning(\"Missing or invalid request type. Default request type SP assumed.\")\n request_type = 'SP'\n\n return rindex, request_type",
"def _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"experiment_config\",\n type=str,\n help='experiment json file (\\'{\"dataset\":\"EmnistDataset\"}\\'',\n )\n args = parser.parse_args()\n return args",
"def readCommand( argv ):\r\n\tfrom optparse import OptionParser\r\n\tusageStr = \"\"\"\r\n\tUSAGE: python sudokusolver.py <options>\r\n\tEXAMPLES: (1) python sudokusolver.py -p my_puzzle.txt -s sa -a T0=0.5,DR=0.9999,maxIter=100000\r\n\t\"\"\"\r\n\tparser = OptionParser(usageStr)\r\n\tparser.add_option('-p', '--puzzle', dest='puzzle', help=default('the puzzle filename'), default=None)\r\n\tparser.add_option('-s', '--solver', dest='solver', help=default('name of the solver (sa or ga)'), default='sa')\r\n\tparser.add_option('-a', '--solverParams', dest='solverParams', help=default('Comma separated pairs parameter=value to the solver. e.g. (for sa): \"T0=0.5,DR=0.9999,nIter=100000\"'))\r\n\t\r\n\toptions, otherjunk = parser.parse_args(argv)\r\n\tif len(otherjunk) != 0:\r\n\t\traise Exception('Command line input not understood: ' + str(otherjunk))\r\n\targs = dict()\r\n\t\r\n\tfd = open(options.puzzle,\"r+\") # Read the Puzzle file\r\n\tpuzzle = eval(fd.readline())\r\n\tarray = []\r\n\tfor row in puzzle:\r\n\t\tfor col in row:\r\n\t\t\tarray.append(col)\r\n\targs['puzzle'] = np.array(array) # puzzle es un vector con todas las filas del puzzle concatenadas (vacios tiene valor 0) \r\n\targs['solver'] = options.solver\r\n\targs['solverParams'] = options.solverParams\r\n\treturn args",
"def parse_arguments():\n description = (\"Create puzzle pieces from input image by random shuffling.\\n\"\n \"Maximum possible rectangle is cropped from original image.\")\n\n\n parser = argparse.ArgumentParser(description=description,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument(\"source\",\n type=str,\n help=\"Path to the input file.\")\n \n parser.add_argument(\"--destination\", \"-d\",\n type=str,\n default=DEFAULT_OUTPUT_DEST,\n help=\"Path to the output file.\")\n\n parser.add_argument(\"--rows\", \"-r\",\n type=int,\n default=DEFAULT_PUZZLE_HEIGHT,\n help=\"Number of rows in the puzzle\")\n\n parser.add_argument(\"--cols\", \"-c\",\n type=int,\n default=DEFAULT_PUZZLE_WIDTH,\n help=\"Number of columns in the puzzle\")\n\n return parser.parse_args()",
"def load_args(base_dir):\n parser = argparse.ArgumentParser()\n args = parser.parse_args(args=[])\n with open(os.path.join(base_dir, \"args.txt\"), \"r\") as f:\n args.__dict__ = json.load(f)\n return args",
"def read_command_line():\n global advanced\n global add_all_variable_names\n\n try:\n options, arguments = getopt.getopt(sys.argv[1:], 'hd:')\n except getopt.GetoptError:\n print_usage()\n print('ERROR: Syntax Error with command!')\n raise SystemExit(22)\n\n command_info = {'source': '', 'model': '', 'location': '', \\\n 'start_time': '', 'variable_names': []}\n for option, argument in options:\n if option == '-h':\n print_usage()\n raise SystemExit(0)\n elif option == '-d':\n add_all_variable_names = False\n advanced = True\n command_info['variable_names'] = argument.split(',')\n\n read_command_info(arguments, command_info)\n\n return command_info",
"def args():\n return []",
"def readCommandLine(self, *args):\r\n return _osgDB.Registry_readCommandLine(self, *args)",
"def read_cmd_line() -> str:\n msg = \"distribute_jobs.py -i input.yml\"\n\n parser = argparse.ArgumentParser(description=msg)\n parser.add_argument('-i', required=True,\n help=\"Input file in YAML format\")\n\n args = parser.parse_args()\n return args.i",
"def parse_args():\n in_file = None\n out_file = None\n try:\n in_file = sys.argv[1]\n out_file = sys.argv[2]\n except IndexError:\n pass\n\n print(f\"{in_file}, {out_file}\")\n return in_file, out_file",
"def arguments():\n if len(sys.argv) == 3:\n cov = 50\n iden = 25\n\n elif len(sys.argv) == 5:\n if sys.argv[3].isnumeric() and sys.argv[4].isnumeric():\n cov = sys.argv[3]\n iden = sys.argv[4]\n else:\n print(\"Error. Coverage cut-off and identity cut-off must be numbers.\")\n help_msg()\n sys.exit()\n\n else:\n print(\"Error. Incorrect number of arguments.\")\n help_msg()\n sys.exit() \n\n return (cov, iden)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the next auction task if a task with auction time == current time step exists in the unallocated tasks
|
def _next_auction_task(self, time_step: int) -> Optional[Task]:
assert time_step >= 0
if self._unallocated_tasks:
assert self._unallocated_tasks[0].auction_time >= time_step, \
f'Top unallocated task auction time {self._unallocated_tasks[0].auction_time} at time step: {time_step}'
return self._unallocated_tasks.pop(0) if self._unallocated_tasks[0].auction_time == time_step else None
|
[
"def get_next_task(self, tasks):\n if len(tasks) == 0:\n return None\n else:\n schedulable =[]\n for task in tasks:\n if task[\"state\"] == \"init\":\n schedulable.append(task)\n elif task[\"state\"] == \"waiting\":\n schedule = True\n for task_id in task[\"waiting_for\"]:\n #Search for task in schedule\n #TODO: Create an index\n for t in tasks:\n if task_id == t[\"task_id\"]:\n if t[\"state\"] != \"complete\":\n schedule = False\n break\n if schedule == True:\n schedulable.append(task)\n #After the tasks complete, then the iteration needs to be \n #increased before this task is resumed \n #TODO: Specific to generate tasks\n #Move elsewhere?\n task['iteration']+=1\n task['state'] = 'running'\n task['waiting_for'] = [];\n task = None\n #TODO: Sort schedulable based on task id\n if len(schedulable) > 0:\n schedulable.sort(key=operator.itemgetter('task_id'))\n task = schedulable[0]\n task['state'] = 'running'\n task['waiting_for'] = [];\n return task",
"def nextTask(self):\n if len(self.Schedule) == 0:\n return None\n elif datetime.now() >= self.Schedule[0][2]:\n task = self.Schedule[0][0]\n param = self.Schedule[0][1]\n self.Schedule.pop(0)\n return (task, param)\n else:\n return None",
"def get_next(self):\n if len(self.tasks) == 0:\n return None\n else:\n task = self.get_next_task(self.tasks)\n return task",
"def get_next_task():\n tasks = get_tasks()\n _clear_dead_jobs()\n selected_task = None\n for tid in tasks:\n task = get_task(tid)\n if not task[\"bg\"] and task[\"status\"] == \"running\":\n selected_task = tid\n break\n if selected_task is None:\n return\n tasks.remove(selected_task)\n tasks.appendleft(selected_task)\n return get_task(selected_task)",
"def next(self, task):\n uuid = str(task.uuid)\n for idx, otask in enumerate(self.tasks[:-1]):\n if otask.uuid == uuid:\n if self.tasks[idx + 1].status != 'SUCCESS':\n return self.tasks[idx + 1]\n else:\n uuid = self.tasks[idx + 1].uuid",
"def current_task(self):\n dayFinished = True\n\n for idx, task in enumerate(self.todayTasks):\n if self.time_between(task['startTime'], task['endTime']):\n timeIndex = idx\n task = self.todayTasks[timeIndex]\n dayFinished = False\n break\n \n if dayFinished:\n print(\"You do not have any other tasks today :) \")\n sys.exit()\n \n return task",
"def queue_next():\n #CD.objects.filter(state='Q').order_by('qtime').first() # django 1.6 feature\n try:\n return models.CD.objects.filter(state='Q').order_by('qtime')[0]\n except IndexError:\n return None",
"def _get_task_item(self, name, machine):\n if name in self.tasks:\n for task in self.tasks[name]:\n if task['machine'] == machine:\n return task\n return None",
"def next_tasks(self):\n due_date_list = self.task_set.exclude(due_date=None).values('due_date') # min() returns a dict, pulling the datetime value out\n if not due_date_list:\n return due_date_list\n next_due_date = min(due_date_list)['due_date'] \n return self.task_set.filter(due_date=next_due_date)",
"def fetch_next(self):\n if self.rate_limit['remaining'] <= 0:\n print('Rate Limit exhausted. Waiting until', self.rate_limit['reset_date'], 'seconds left:', self.rate_limit['time_left'])\n interval = self.rate_limit['time_left']\n else:\n priority, q_insertion_num, github_path = self.queue.get()\n\n # Spawn a thread to download the GitHub data for the item and store it in the database\n self.Downloader(self, github_path, priority).start()\n\n # set timer for getting the next task.\n # keep q_insertion_num the same to keep sort order\n next_task = self.queue.get()\n next_priority = next_task[0]\n self.queue.put(next_task)\n\n if next_priority == self.priority_uncached:\n interval = self.interval_uncached\n elif next_priority == self.priority_user_requested:\n interval = self.interval_user_requested\n else:\n interval = self.interval_normal\n\n self.fetch_timer = DaemonTimer(interval, self.fetch_next)\n self.fetch_timer.start()",
"def popNextJob(self, t):\n laterJobs = [(i, job) for (i,job) in enumerate(self.jobs) if job.releaseTime >= t]\n\n if len(laterJobs) == 0:\n return None\n\n self._updateActivePriorities()\n\n laterJobs.sort(key = lambda x: (x[1].releaseTime, self.activePriorities[x[1]], x[1].task.id))\n return self.jobs.pop(laterJobs[0][0]) # get the index from the tuple in the 0th position",
"def peek_next(self):\n return self.schedule[0]",
"def find_voting_task_by_source_tasks(self, source_task_one, source_task_two):\n # Ensure the index is built\n self.current_voting_tasks()\n ordered_tasks = [source_task_one, source_task_two]\n ordered_tasks.sort(key=lambda t: t.id)\n if ordered_tasks[0].id not in self._voting_task_index:\n return None\n if ordered_tasks[1].id not in self._voting_task_index[ordered_tasks[0].id]:\n return None\n return self._voting_task_index[ordered_tasks[0].id][ordered_tasks[1].id]",
"def next(self, keepout = 0):\n last_check = None\n while len(self.to_fetch):\n uri = self.to_fetch.pop(0)\n\n # Break if we can't check any of these... shit!\n if last_check and last_check == uri:\n self.to_fetch.append(uri)\n break\n\n # Check if we're still behind the keepout\n if time.time() < self.last_fetched[uri] + keepout:\n # Re-enqueue this guy\n self.to_fetch.append(uri)\n last_check = uri\n continue\n\n self.queued.remove(uri)\n self.mark_fetched(uri)\n return uri\n\n raise Exception('Empty')",
"def next_scheduled_network_packet(self, current_time):\n if self._scheduled_network_packets and self._scheduled_network_packets[0][0] <= current_time:\n scheduled_network_packet = self._scheduled_network_packets.pop(0)\n unique_id = scheduled_network_packet[1]\n network_packet_json_string = scheduled_network_packet[2]\n return unique_id, network_packet_json_string\n\n return None, None",
"def get_next_unit(self):\n\n available_controllers = self.get_available_controllers()\n free_units = dict(filter(\n lambda x: x[0].assign_next_at == self.time_slot, available_controllers.items()))\n # print(type(free_units))\n sorted_units = sorted(free_units,\n key=lambda x: len(free_units[x]))\n # print(\"Sorted Units: \", sorted_units)\n return sorted_units[0]",
"def next_task(self):\n # print(self.stack)\n if self.stack == []:\n return False\n task = self.stack.pop()\n # Execute task and push back to stack if not completed\n if not task.execute_task():\n self.stack.append(task)\n return True",
"def get_next_point(self, t): \n assert ('t' in self.mode)\n t_index = self.mode.index('t')\n res = self.first()\n while (res[t_index] < t and not self.is_empty()):\n self.pop()\n res = self.first()\n\n if self.is_empty():\n return None\n return res",
"def time_until_next_repeat(self):\r\n task = self.ndb._task\r\n if task:\r\n try:\r\n return int(round(task.next_call_time()))\r\n except TypeError:\r\n pass\r\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
reads in a CSV file containing 40kb bins. for now, assuming there is only one type of binlist per file
|
def readBins(filename):
reader = csv.reader(open(filename, 'rU'))
chr_num = 0
bins = {}
for row in reader:
if len(row) == 1:
if row[0][0:3] == 'chr':
chr_num = int(row[0].lstrip('chr'))
if len(row) > 1:
assert chr_num not in bins.keys()
bins[chr_num] = [int(x) for x in row]
return bins
|
[
"def _get_bin_count(self, file):\n bins = 0\n\n with open(file, 'r', encoding = self.encoding) as f:\n for line in f:\n try:\n if float(line.split(',')[0]):\n bins = bins + 1\n except: pass\n\n return bins",
"def read_histogram(hist_file):\n import csv\n with open(hist_file, \"r\") as hist_handle:\n hist_reader = csv.reader(hist_handle)\n total, binsize = hist_reader.next()\n total = int(total)\n hist = dict()\n for row in hist_reader:\n r,g,b,count = row\n hist[(int(r),int(g),int(b))] = int(count)\n return (total, binsize, hist)",
"def loads(self, rows):\n binnedRows = []\n for row in rows:\n if isinstance(row, GenePred):\n row = row.getRow()\n bin = rangeFinder.calcBin(int(row[3]), int(row[4]))\n binnedRows.append((bin,) + tuple(row))\n self.loadsWithBin(binnedRows)",
"def load_data_bin(filePath):\n \n dataFile = open(filePath)\n \n data = []\n labels = []\n for sample in dataFile:\n fields = sample.strip('\\n').split('\\t')\n fields = [int(x) for x in fields] \n labels.append(fields[0])\n data.append(fields[1:])\n dataFile.close()\n return data, labels",
"def get_number_of_bins_and_cutoff(filename, cutoff_col_index):\n LIST_IN = open(filename, 'r') \n number_of_bins = 0\n cutoff = 0\n offset = 0\n for line in LIST_IN:\n if line[0:1] != \"#\":\n NewRow = (line.strip()).split()\n if len(NewRow)>2:\n if number_of_bins == 0:\n offset = float(NewRow[cutoff_col_index])\n number_of_bins += 1\n cutoff = float(NewRow[cutoff_col_index])\n LIST_IN.close()\n \n return number_of_bins, cutoff, offset",
"def read_big_puzzle(input_file):\n puzzle = []\n infile = csv.reader(open(input_file), delimiter=',')\n\n for row in infile:\n puzzle.append([int(entry) for entry in row])\n\n return puzzle",
"def load_data(data_file=None):\n\n # Set defaults.\n if data_file is None:\n data_file = 'bin_seq.csv'\n\n bin_array = np.genfromtxt(data_file, delimiter=',')\n\n return(bin_array)",
"def import_from_csv(self, csv_file):\n reader = csv.reader(csv_file)\n\n self.variable_labels = next(reader, None)[1:]\n self.element_labels = []\n self.data = []\n\n data_mode = True\n for row in reader:\n if not any(row):\n if data_mode:\n data_mode = False\n continue\n else:\n if data_mode:\n self.element_labels.append(row[0])\n self.data.append([int(i) for i in row[1:]])\n else:\n self.weights = [int(i) for i in row[1:]]\n self.neg_min = [int(i) for i in next(reader, None)[1:]]\n self.pos_max = [int(i) for i in next(reader, None)[1:]]\n break",
"def load(infile):\n with open(infile, 'rU') as inf:\n return [line for line in csv.reader(inf)][1:]",
"def read_ballistic_hop_csv(bhop_csv):\n return pd.read_csv(bhop_csv, header=0, index_col=0)",
"def read_coupling_csv(path):\n with open(path) as csv:\n next(csv) # skip first line\n file_couplings = []\n for line in csv:\n entity, coupled, degree, avg_revs = line.split(',')\n fc = FileCoupling(entity, coupled, degree, avg_revs)\n file_couplings.append(fc)\n return file_couplings",
"def _load_norm(self, bin_width):\n norm_dir = '/home/mike/research/ac6_microburst_scale_sizes/data/norm'\n norm_name = 'equatorial_norm.csv'\n norm_path = os.path.join(norm_dir, norm_name)\n self.norm = pd.read_csv(norm_path, index_col=0)\n sep_min = self.norm.index.min()\n sep_max = self.norm.index.max()\n\n if self.norm.index[1] - self.norm.index[0] != bin_width:\n # Now rebin by the bin sizes.\n self.norm = self.norm.groupby(self.norm.index//bin_width).sum()\n # Replace the consecutive indicies with [0, bin_width, 2*bin_width...] \n self.norm = self.norm.set_index(\n np.arange(sep_min, sep_max+1, bin_width))\n return",
"def read_file(file, fN):\n\twith open(file, 'rb') as q:\n\t\treader = csv.reader(q,delimiter=\",\",quotechar = \"\\\"\")\n\t\tfor row in reader:\n\t\t\tcopy = list(row)\n\t\t\ti = 0\n\t\t\tfor cell in copy:\n\t\t\t\tif cell.find('<') != -1:\n\t\t\t\t\tx = float(cell[1:])/2\n\t\t\t\t\tcopy[i] = x\n\t\t\t\ti = i + 1\n\t\t\twrite_row(copy, fN)",
"def sparse_multiyear_histogram(years, csv_template, bahistfile, \r\n count_threshold=50, bins=25, out_template=None) :\r\n # open the ba histogram file\r\n bahist = nc.Dataset(bahistfile)\r\n counts = bahist.variables['burned_total']\r\n \r\n # read all csv files and concatenate\r\n file_list = []\r\n for y in years : \r\n file_list.append(pd.read_csv(csv_template % y))\r\n compare = pd.concat(file_list)\r\n compare = compare[ np.logical_and(compare.icol(0)>=10,compare.icol(0)<364) ] \r\n \r\n # get min/max/bin from multiyear histogram file\r\n mmb, binsizes = read_multiyear_minmax(bahist,counts.dimensions)\r\n \r\n # create an indexer\r\n index = ah.init_indexers(mmb) \r\n \r\n # strip out geometry\r\n dim_bins = [m[2] for m in mmb] \r\n \r\n # create sparse histograms\r\n shisto_forest = ah.SparseKeyedHistogram(minmax=mmb, threshold=count_threshold,\r\n bins=bins, default_minmax=(1,count_threshold,count_threshold-1))\r\n shisto_not_forest = ah.SparseKeyedHistogram(minmax=mmb, threshold=count_threshold,\r\n bins=bins, default_minmax=(1,count_threshold,count_threshold-1))\r\n shisto_total = ah.SparseKeyedHistogram(minmax=mmb, threshold=count_threshold,\r\n bins=bins, default_minmax=(1,count_threshold,count_threshold-1))\r\n\r\n \r\n\r\n # loop through all bins with nonzero data\r\n i_nonzero = np.where( counts[:]>0 )\r\n for i_bin in zip(*i_nonzero) : \r\n total = select_data(compare, counts.dimensions, i_bin, index, dim_bins)\r\n forest = total[ total.ix[:,1].isin(FOREST_LC) ]\r\n not_forest = total [ total.ix[:,1].isin(NONFOREST_LC) ]\r\n\r\n shisto_forest.put_combo(i_bin, forest['BA Count'], units=False)\r\n shisto_not_forest.put_combo(i_bin, not_forest[\"BA Count\"], units=False)\r\n shisto_total.put_combo(i_bin, total['BA Count'], units=False)\r\n \r\n # save file if filename template specified\r\n if out_template is not None : \r\n ah.save_sparse_histos(shisto_total, out_template%'total')\r\n ah.save_sparse_histos(shisto_forest, out_template%'forest')\r\n ah.save_sparse_histos(shisto_not_forest, out_template%'not_forest')\r\n \r\n bahist.close()\r\n \r\n return (shisto_total, shisto_forest, shisto_not_forest)",
"def ReadBinnedSAData(DataDirectory, fname_prefix):\n # get the csv filename\n fname_suffix = \"_SAbinned.csv\"\n fname = fname_prefix+fname_suffix\n df = pd.read_csv(DataDirectory+fname)\n\n return df",
"def bin_filter_by_blacklist(bin_count, blacklistfile, \\\n binsize = 10000, ext = 0, set_value = 2):\n bin_count_ = bin_count.copy()\n\n # convert bed file to gr\n gr = GRange(blacklistfile, 'bedfile')\n\n # init blacklist bins\n bl_bin = defaultdict(lambda: False)\n\n # save blacklist bins to dict\n for _gr in gr.gr:\n _start = math.floor(_gr[1].start/binsize) - ext\n _end = math.floor(_gr[1].stop/binsize) + 1 + ext\n for i in range(_start, _end):\n bl_bin[(_gr[0], i*binsize)] = True\n\n # get the bool vector of blacklist bins\n tf = [bl_bin[row[1:3]] for row in bin_count_.itertuples()]\n\n # set blacklist bins to 0\n # bin_count_.loc[tf, 'Count'] = set_value\n # bin_count_['CN'] = 2*bin_count_['Count']/np.mean(bin_count_['Count'])\n\n # set blacklist bins CN to 2, left count unchanged\n bin_count_.loc[tf, 'CN'] = set_value\n\n return bin_count_",
"def readTruBlu(csvfile):\n sep = ','\n header = 0\n skiprows = 16 #this is somewhat weak, number of lines could change over time??\n\t# Definitely weak. Probably an automated read to csv header would be better\n index_col = 3\n #names = ['ID','Name','Address','Time of Acquisition','Elapsed(Sec)','Level(PSI)','Temperature (\\'C)','Battery Voltage(Volt)','Supply Voltage(Volt)','Scan No','blank']\n parse_dates = True\n #skip_footer = 1\n #print(csvfile)\n #df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates)\n \n try:\n if os.stat(csvfile).st_size > 0:\n df = read_csv(csvfile, sep=sep, skiprows=skiprows, header=header, index_col=index_col, parse_dates=parse_dates)\n return df\n else:\n print((csvfile + \" is empty\"))\n except OSError:\n print((csvfile + \" does not exist\"))",
"def csvparser(cfile):\r\n\r\n csvreader = csv.reader(cfile, delimiter = ',')\r\n tile = []\r\n count = 0\r\n for row in csvreader:\r\n if count == 0:\r\n count += 1\r\n continue\r\n else:\r\n tile.append(row[1:])\r\n return tile",
"def read_in_interaction_file(filename, number_of_bins):\n distance = np.zeros((number_of_bins+1))\n potential = np.zeros((number_of_bins+1)) \n derivative = np.zeros((number_of_bins+1)) \n print \"OPENING FILE %s\" % (filename) \n LIST_IN = open(filename, 'r') \n \n element_count = 0 # element_count is a surrogate for the numbering of the\n # elements (int(NewRow[0])). \n # The original numbering may be inconsistent in the source file. We do not\n # want that to\n # propagate to new files...\n \n for line in LIST_IN:\n if line[0:1] != \"#\":\n NewRow = (line.strip()).split() \n if len(NewRow)>3:\n element_count += 1\n if abs(float(NewRow[3])) > 0:\n distance[element_count] = float(NewRow[1])\n potential[element_count] = float(NewRow[2])\n derivative[element_count] = float(NewRow[3]) \n return distance, potential, derivative"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validate FHIR conformance resources and validate example FHIR resources against the conformance resources by running the HL7 FHIR implementation guide publisher.
|
def validate(ig_control_filepath, clear_output, publisher_opts):
try:
app.validate(ig_control_filepath, clear_output, publisher_opts)
except Exception as e:
logger.exception(str(e))
logger.info('❌ Validation failed!')
exit(1)
else:
logger.info('✅ Validation succeeded!')
|
[
"def test_validate_schema_2(self):\n\n # prepare\n validator = EsdlValidator()\n\n # execute, validate against 1 schema\n result = validator.validate(self.esdlHybrid, [self.schemaTwo])\n validationProducer = result.schemas[0].validations[0]\n validationStorage = result.schemas[0].validations[1]\n validationGasHeater = result.schemas[0].validations[2]\n validationHeatpump = result.schemas[0].validations[3]\n validationCostsInRange = result.schemas[0].validations[4]\n\n # assert\n self.assertEqual(validationProducer.checked, 3, \"there should be 3 checked since there are only 3 producers\")\n self.assertEqual(len(validationProducer.errors), 2, \"there should be 2 errors since 1 producer validates ok\")\n self.assertEqual(validationProducer.errors[0], \"Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None\", \"Warning should say: Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None\")\n\n self.assertEqual(validationStorage.checked, 1, \"there should be 1 checked storage\")\n self.assertEqual(len(validationStorage.errors), 0, \"there should be 0 errors, storage should be correct\")\n\n self.assertEqual(validationGasHeater.checked, 1, \"there should be 1 checked GasHeater\")\n self.assertEqual(len(validationGasHeater.warnings), 0, \"there should be 0 warnings, gasheater should be correct\")\n\n self.assertEqual(validationHeatpump.checked, 1, \"there should be 1 checked HeatPump\")\n self.assertEqual(len(validationHeatpump.warnings), 1, \"there should be 1 warnings, heatpump should be missing a control strategy\")\n\n self.assertEqual(validationCostsInRange.checked, 3, \"there should be 3 checked costs\")\n self.assertEqual(len(validationCostsInRange.warnings), 1, \"there should be 1 warnings\")",
"def test_vmware_service_resources_validate_subscription_post(self):\n pass",
"def main(source):\n if source is None:\n click.echo(\n \"You need to supply a file or url to a schema to a swagger schema, for\"\n \"the validator to work.\"\n )\n return 1\n try:\n load(source)\n click.echo(\"Validation passed\")\n return 0\n except ValidationError as e:\n raise click.ClickException(str(e))",
"def test_validate_multiple_schemas(self):\n\n # prepare\n validator = EsdlValidator()\n\n # execute, validate against 2 schemas\n result = validator.validate(self.esdlHybrid, [self.schemaOne, self.schemaTwo])\n\n # assert\n self.assertEqual(len(result.schemas), 2, \"there should be 2 schemas in the result\")\n self.assertEqual(result.valid, False, \"There should be errors in the schema's, valid should be false\")\n self.assertEqual(result.errorCount, 2, \"There should be a total of 2 errors\")\n self.assertEqual(result.warningCount, 3, \"There should be 3 warnings in total\")",
"def xsd_validate(self):\n if not self.tree:\n self.xml_validate()\n\n self.valid8r = Validator(self.content, self.includes, self.imports)",
"def ExampleValidator(\n statistics_path: InputPath('ExampleStatistics'),\n schema_path: InputPath('Schema'),\n\n anomalies_path: OutputPath('ExampleAnomalies'),\n):\n from tfx.components.example_validator.component import ExampleValidator as component_class\n\n #Generated code\n import json\n import os\n import tensorflow\n from google.protobuf import json_format, message\n from tfx.types import Artifact, channel_utils, artifact_utils\n\n arguments = locals().copy()\n\n component_class_args = {}\n\n for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():\n argument_value_obj = argument_value = arguments.get(name, None)\n if argument_value is None:\n continue\n parameter_type = execution_parameter.type\n if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple\n argument_value_obj = parameter_type()\n json_format.Parse(argument_value, argument_value_obj)\n component_class_args[name] = argument_value_obj\n\n for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():\n artifact_path = arguments[name + '_path']\n if artifact_path:\n artifact = channel_parameter.type()\n artifact.uri = artifact_path + '/' # ?\n if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:\n # Recovering splits\n subdirs = tensorflow.io.gfile.listdir(artifact_path)\n artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))\n component_class_args[name] = channel_utils.as_channel([artifact])\n\n component_class_instance = component_class(**component_class_args)\n\n input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}\n output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}\n exec_properties = component_class_instance.exec_properties\n\n # Generating paths for output artifacts\n for name, artifacts in output_dict.items():\n base_artifact_path = arguments[name + '_path']\n # Are there still cases where output channel has multiple artifacts?\n for idx, artifact in enumerate(artifacts):\n subdir = str(idx + 1) if idx > 0 else ''\n artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'\n\n print('component instance: ' + str(component_class_instance))\n\n #executor = component_class.EXECUTOR_SPEC.executor_class() # Same\n executor = component_class_instance.executor_spec.executor_class()\n executor.Do(\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n )",
"def validate(self):\n\n\t\t# Run the provider validation\n\t\tself.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error)\n\t\t# Run the validation function for any plugin that has it\n\t\tfor plugin in self.modules['plugins']:\n\t\t\tvalidate = getattr(plugin, 'validate_manifest', None)\n\t\t\tif callable(validate):\n\t\t\t\tvalidate(self.data, self.schema_validator, self.validation_error)",
"def test_questions_yaml_schema(categories):\n snapshot_schema = {\n \"type\": \"dict\",\n \"schema\": {\n \"name\": {\"type\": \"string\", \"required\": True},\n \"path\": {\"type\": \"string\", \"required\": True},\n },\n }\n parameter_schema = {\n \"type\": \"dict\",\n \"schema\": {\n \"name\": {\"type\": \"string\", \"required\": True},\n \"value\": {\"type\": [\"string\", \"integer\"], \"required\": True},\n },\n }\n schema = {\n \"categories\": { # list of question categories\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n \"name\": {\"type\": \"string\", \"required\": True},\n \"description\": {\"type\": \"string\"}, # human-readable category name\n \"introduction\": {\n \"type\": \"string\"\n }, # what the question category does\n \"questions\": {\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n # human-readable question name\n \"name\": {\"type\": \"string\", \"required\": True},\n # code/pybatfish question name\n \"pybf_name\": {\"type\": \"string\", \"required\": True},\n \"type\": {\n \"type\": \"string\", # indicates what code to generate for result inspection\n \"allowed\": [\n \"basic\",\n \"singleflow\",\n \"dualflow\",\n \"no-result\",\n \"diff\",\n ],\n },\n \"snapshot\": snapshot_schema, # snapshot to execute on, if not default\n \"reference_snapshot\": snapshot_schema, # reference_snapshot for \"diff\" questions\n \"parameters\": {\n \"type\": \"list\",\n \"schema\": parameter_schema,\n },\n },\n },\n },\n },\n },\n }\n }\n v = cerberus.Validator(schema)\n\n if not v.validate(categories):\n raise AssertionError(v.errors)",
"def validate(self):\n logger.info('Starting validation of transmittal %s' % self.basename)\n\n self._errors = dict()\n self._validate_transmittal()\n self._validate_csv_content()\n\n # We need a valid transmittals to check revision numbers\n if not self._errors:\n self._validate_revisions()",
"def console_validate(\n # Source\n source: List[str] = common.source,\n name: str = common.resource_name,\n type: str = common.type,\n path: str = common.path,\n scheme: str = common.scheme,\n format: str = common.format,\n encoding: str = common.encoding,\n innerpath: str = common.innerpath,\n compression: str = common.compression,\n schema: str = common.schema,\n hash: str = common.hash,\n bytes: int = common.bytes,\n fields: int = common.fields,\n rows: int = common.rows,\n basepath: str = common.basepath,\n # Dialect\n dialect: str = common.dialect,\n header_rows: str = common.header_rows,\n header_join: str = common.header_join,\n comment_char: str = common.comment_char,\n comment_rows: str = common.comment_rows,\n sheet: str = common.sheet,\n table: str = common.table,\n keys: str = common.keys,\n keyed: bool = common.keyed,\n # Detector\n buffer_size: int = common.buffer_size,\n sample_size: int = common.sample_size,\n field_type: str = common.field_type,\n field_names: str = common.field_names,\n field_confidence: float = common.field_confidence,\n field_float_numbers: bool = common.field_float_numbers,\n field_missing_values: str = common.field_missing_values,\n schema_sync: bool = common.schema_sync,\n # Checklist\n checklist: str = common.checklist,\n checks: str = common.checks,\n pick_errors: str = common.pick_errors,\n skip_errors: str = common.skip_errors,\n # Command\n parallel: bool = common.parallel,\n limit_rows: int = common.limit_rows,\n limit_errors: int = common.limit_errors,\n yaml: bool = common.yaml,\n json: bool = common.json,\n debug: bool = common.debug,\n trusted: bool = common.trusted,\n standards: str = common.standards,\n # Deprecated\n resource_name: str = common.resource_name,\n):\n console = Console()\n name = name or resource_name\n\n # Setup system\n if trusted:\n system.trusted = trusted\n if standards:\n system.standards = standards # type: ignore\n\n # Create source\n source = helpers.create_source(source, path=path)\n if not source and not path:\n note = 'Providing \"source\" or \"path\" is required'\n helpers.print_error(console, note=note)\n raise typer.Exit(code=1)\n\n try:\n # Create dialect\n dialect_obj = helpers.create_dialect(\n descriptor=dialect,\n header_rows=header_rows,\n header_join=header_join,\n comment_char=comment_char,\n comment_rows=comment_rows,\n sheet=sheet,\n table=table,\n keys=keys,\n keyed=keyed,\n )\n\n # Create detector\n detector_obj = helpers.create_detector(\n buffer_size=buffer_size,\n sample_size=sample_size,\n field_type=field_type,\n field_names=field_names,\n field_confidence=field_confidence,\n field_float_numbers=field_float_numbers,\n field_missing_values=field_missing_values,\n schema_sync=schema_sync,\n )\n\n # Create checklist\n checklist_obj = helpers.create_checklist(\n descriptor=checklist,\n checks=checks,\n pick_errors=pick_errors,\n skip_errors=skip_errors,\n )\n\n # Create resource\n resource = Resource(\n source=helpers.create_source(source),\n name=name,\n path=path,\n scheme=scheme,\n format=format,\n datatype=type,\n compression=compression,\n innerpath=innerpath,\n encoding=encoding,\n hash=hash,\n bytes=bytes,\n fields=fields,\n rows=rows,\n schema=schema,\n basepath=basepath,\n detector=detector_obj,\n )\n\n # Add dialect\n if dialect_obj:\n resource.dialect = dialect_obj\n\n # Validate resource\n report = resource.validate(\n checklist_obj,\n name=name,\n parallel=parallel,\n limit_rows=limit_rows,\n limit_errors=limit_errors,\n )\n code = int(not report.valid)\n except Exception as exception:\n helpers.print_exception(console, debug=debug, exception=exception)\n raise typer.Exit(code=1)\n\n # Yaml mode\n if yaml:\n content = report.to_yaml().strip()\n print(content)\n raise typer.Exit(code=code)\n\n # Json mode\n if json:\n content = report.to_json()\n print(content)\n raise typer.Exit(code=code)\n\n # Default mode\n labels = [\"Row\", \"Field\", \"Type\", \"Message\"]\n props = [\"row_number\", \"field_number\", \"type\", \"message\"]\n names = [\"dataset\"] + [task.name for task in report.tasks]\n matrix = [report.errors] + [task.errors for task in report.tasks]\n\n # Status\n if report.tasks:\n console.rule(\"[bold]Dataset\")\n view = Table(title=\"dataset\")\n view.add_column(\"name\")\n view.add_column(\"type\")\n view.add_column(\"path\")\n view.add_column(\"status\")\n for task in report.tasks:\n status = \"VALID\" if task.valid else \"INVALID\"\n style = \"green\" if task.valid else \"bold red\"\n status_row = [task.name, task.type, task.place, status]\n view.add_row(*status_row, style=style)\n console.print(view)\n\n # Errors\n if not report.valid:\n console.rule(\"[bold]Tables\")\n for name, errors in zip(names, matrix):\n if errors:\n view = Table(title=name)\n for label in labels:\n view.add_column(label)\n for error in errors:\n error_row: List[str] = []\n for prop in props:\n error_row.append(str(getattr(error, prop, None)))\n view.add_row(*error_row)\n console.print(view)\n\n # Proper retcode\n raise typer.Exit(code=code)",
"def test_conformance(self):\n self._request_valid(\"conformance\")",
"def perform_validation(subscription):\n logger.info(f'Performing subscription validation for: {subscription[\"subscriptionName\"]}')\n check_missing_data(subscription)\n logger.info(f'No missing data found for: {subscription[\"subscriptionName\"]}')\n check_duplicate_fields(subscription[\"subscriptionName\"])\n logger.info(f'No duplicate data found for: {subscription[\"subscriptionName\"]}')\n validate_nf_filter(subscription[\"nfFilter\"])\n logger.info(f'Filter data is valid for: {subscription[\"subscriptionName\"]}')",
"def validate_suite(io_specifications):\n for spec_num in range(len(io_specifications)):\n BNCValidator.validate_spec(spec_num, io_specifications)",
"def _validate_plan(self) -> None:\n # Check for repetition of metrics and validators\n metric_names = [m.metric_name for m in self.metrics]\n validator_names = [v.validator_name for v in self.validators]\n composite_metric_names = [cm.composite_metric_name for cm in self.composite_metrics]\n\n if len(set(metric_names)) != len(metric_names):\n raise RuntimeError(\"You cannot have repeated metric names.\")\n\n if len(set(validator_names)) != len(validator_names):\n raise RuntimeError(\"You cannot have repeated validator names.\")\n\n if len(set(composite_metric_names)) != len(composite_metric_names):\n raise RuntimeError(\"You cannot have repeated composite metric names.\")\n\n # Check if we have all validators specified in the intervention list\n for vname in self.intervention_validators:\n if vname not in validator_names:\n raise RuntimeError(f\"Validator '{vname}' not found in validators list.\")\n\n # Check for consistency of the validators, if we have all required\n # metrics especified.\n metric_calculators = self.metrics_dict()\n for validator in self.validators:\n for metric_requirement in validator.requires_metric:\n if metric_requirement not in metric_calculators:\n raise RuntimeError(f\"Metric '{metric_requirement}' required \"\n f\"by validator '{validator.validator_name}'.\")\n\n # Check for consistency of the composite metrics, if we have all required\n # metrics and validators specified.\n validators_specified = self.validators_dict()\n for cm in self.composite_metrics:\n # Check for metric requirements\n for metric_requirement in cm.requires_metric:\n if metric_requirement not in metric_calculators:\n raise RuntimeError(f\"Metric '{metric_requirement}' required \"\n f\"by composite metric '{cm.composite_metric_name}'.\")\n # Check for validator requirements\n for validator_requirement in cm.requires_validator:\n if validator_requirement not in validators_specified:\n raise RuntimeError(f\"Validator '{validator_requirement}' required \"\n f\"by composite metric '{cm.composite_metric_name}'.\")",
"def test_validates(self):\n doc = InstructionDocument(creator_id=self.cid,\n name='name',instruction={\"load\":\"google.com\"})\n doc.validate()",
"def test_all():\n assert not hug.validate.all(\n hug.validate.contains_one_of(\"first\", \"year\"), hug.validate.contains_one_of(\"last\", \"place\")\n )(TEST_SCHEMA)\n assert hug.validate.all(\n hug.validate.contains_one_of(\"last\", \"year\"), hug.validate.contains_one_of(\"first\", \"place\")\n )(TEST_SCHEMA)",
"def validate(self):\n if self.guideStars is None:\n raise RuntimeError('The GuideStars instance cannot be None. '\n 'If no valid instance can be provided, '\n 'pass and empty instance, eg., GuideStars.empty().')\n\n if (self.variant != 0) ^ (self.designId0 != 0):\n raise RuntimeError(\"if either variant or designId0 is set both must be set.\")\n\n if len(set([len(getattr(self, nn)) for nn in self._keywords])) != 1:\n raise RuntimeError(\"Inconsistent lengths: %s\" % ({nn: len(getattr(self, nn)) for\n nn in self._keywords}))\n for ii, tt in enumerate(self.targetType):\n try:\n TargetType(tt)\n except ValueError as exc:\n raise ValueError(\"targetType[%d] = %d is not a recognized TargetType\" % (ii, tt)) from exc\n for ii, tt in enumerate(self.fiberStatus):\n try:\n FiberStatus(tt)\n except ValueError as exc:\n raise ValueError(\"fiberStatus[%d] = %d is not a recognised FiberStatus\" % (ii, tt)) from exc\n\n for ii, (mag, names) in enumerate(zip(self.fiberFlux, self.filterNames)):\n if len(mag) != len(names):\n raise RuntimeError(\"Inconsistent lengths between fiberFlux (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(mag), len(names), self.fiberId[ii]))\n for ii, (pFlux, names) in enumerate(zip(self.psfFlux, self.filterNames)):\n if len(pFlux) != len(names):\n raise RuntimeError(\"Inconsistent lengths between psfFlux (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(pFlux), len(names), self.fiberId[ii]))\n for ii, (tFlux, names) in enumerate(zip(self.totalFlux, self.filterNames)):\n if len(tFlux) != len(names):\n raise RuntimeError(\"Inconsistent lengths between totalFlux (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(tFlux), len(names), self.fiberId[ii]))\n for ii, (ffErr, names) in enumerate(zip(self.fiberFluxErr, self.filterNames)):\n if len(ffErr) != len(names):\n raise RuntimeError(\"Inconsistent lengths between fiberFluxErr (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(ffErr), len(names), self.fiberId[ii]))\n for ii, (pfErr, names) in enumerate(zip(self.psfFluxErr, self.filterNames)):\n if len(pfErr) != len(names):\n raise RuntimeError(\"Inconsistent lengths between psfFluxErr (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(pfErr), len(names), self.fiberId[ii]))\n for ii, (tfErr, names) in enumerate(zip(self.totalFluxErr, self.filterNames)):\n if len(tfErr) != len(names):\n raise RuntimeError(\"Inconsistent lengths between totalFluxErr (%d) and filterNames (%d) \"\n \"for fiberId=%d\" % (len(tfErr), len(names), self.fiberId[ii]))\n for nn in self._pointFields:\n matrix = getattr(self, nn)\n if matrix.shape != (len(self.fiberId), 2):\n raise RuntimeError(\"Wrong shape for %s: %s vs (%d,2)\" % (nn, matrix.shape, len(self.fiberId)))\n\n # Check for duplicates of catId, objId combinations.\n counts = Counter(zip(self.catId, self.objId))\n counts.pop((-1, -1), None) # ignore untargetted fibers\n if counts and counts.most_common(1)[0][1] > 1:\n duplicates = {tup: count for tup, count in counts.items() if count > 1}\n raise ValueError(f'design {self.pfsDesignId:#016x} contains duplicate occurrences of'\n ' the same (catId, objId) combination. Details below:\\n'\n f'{{(catId, objId): number of occurrences}}:\\n\\t {duplicates}')",
"def run():\n openapi_schema = yaml.load(openapi_schema_pattern)\n schemas = openapi_schema[\"components\"][\"schemas\"]\n\n with open(crd_list, 'r') as crd_list_file: # read file with CRD locations\n crd_list_data = yaml.load(crd_list_file)\n\n with open(crd_list_data['crdList'], 'r') as yaml_file:\n crd_data = yaml.load_all(yaml_file) # read CRDs\n for crd in crd_data:\n try:\n if check_yaml_kind(crd):\n process_crd(crd, schemas, crd_list_data[\"schemasLocation\"], getenv(rewrite_env) is not None)\n except Exception as exc:\n error(\"An error occurred while processing CRD data from phase rendered docs\\n{}\".format(exc))\n\n # Validate output V3 spec\n try:\n validate_v3_spec(openapi_schema)\n info(\"Validation of OpenAPIV3Schemas is successful\")\n except OpenAPIValidationError as exc:\n error(\"An error occurred while validating OpenAPIV3Schema\")\n raise exc\n\n # Rewrite openAPI schema file\n with open(openapi_schema_path, 'w') as openapi_schema_file:\n info(\"Saving OpenAPIV3Schemas\")\n yaml.dump(openapi_schema, openapi_schema_file)\n\n # run openapi2jsonschema conversion\n command.default()",
"def build(self):\n if not self._package.resources:\n raise ValidationError(\"At least one data resource is required.\")\n\n resource = self._package.resources[0]\n if 'schema' not in resource.descriptor:\n raise ValidationError(\"The 'schema' object is missing in resource\")\n if 'questions' not in resource.descriptor['schema']:\n raise ValidationError(\n \"The 'questions' object is missing from schema\")\n\n questions = resource.descriptor['schema']['questions']\n if isinstance(questions, dict):\n question_keys = list(questions.keys())\n question_keys.sort()\n for name in question_keys:\n xform_from_floip_dict(self._survey, name, questions[name])\n elif isinstance(questions, list):\n for question in questions:\n for name in question:\n xform_from_floip_dict(self._survey, name, question[name])\n else:\n raise ValidationError(\n \"Expecting 'questions' to be an object or array\")\n\n meta_dict = {\n \"name\": \"meta\",\n \"type\": \"group\",\n \"control\": {\n \"bodyless\": True\n },\n \"children\": [{\n \"name\": \"instanceID\",\n \"type\": \"calculate\",\n \"bind\": {\n \"calculate\": \"concat('uuid:', uuid())\"\n }\n }, {\n \"name\": \"contactID\",\n \"type\": \"string\",\n }, {\n \"name\": \"sessionID\",\n \"type\": \"string\",\n }]\n } # yapf: disable\n self._survey.add_child(create_survey_element_from_dict(meta_dict))\n self._survey.validate()\n\n # check that we can recreate the survey object from the survey JSON\n create_survey_element_from_dict(self._survey.to_json_dict())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convenience method to add the necessary configuration for the resource(s) to the IG configuration so that the resource is included in the generated IG site. NOTE The resource file, `data_path`, must already be in the IG site root. This CLI command does not move the file into the site root. \b
|
def add(data_path, ig_control_filepath):
try:
app.update_ig_config(data_path, ig_control_filepath)
except Exception as e:
logger.exception(str(e))
logger.info(f'❌ Add {data_path} to IG failed!')
exit(1)
else:
logger.info(f'✅ Add {data_path} to IG succeeded!')
|
[
"def add_resources(resources_dir, site_config, referable_test=is_common_media_file):\n item_config = site_config.get(\"item_config\")\n for file in Path(resources_dir).glob(\"**/*\"):\n if file.is_file():\n if referable_test(file):\n id = file.name\n else:\n id = \"_%d\" % len(item_config)\n\n if id in item_config:\n raise ValueError(\"An item with the same ID already exists: \" + id)\n\n endpoint = str(file)[len(resources_dir) :].replace(\"\\\\\", \"/\")\n\n item_config[id] = {\n \"endpoint\": endpoint,\n \"source\": str(file),\n }",
"def add_resources_file():\n # Validate the request body contains JSON\n if not request.is_json:\n return jsonify(\"Error\", \"No json data received\"), 400\n # Parse the JSON into a Python dictionary\n req = request.get_json(silent=True)\n return update_json_file(data=req,\n schema_name=\"set_resources\",\n target_file_path=\"/data/jsons/resources.json\",\n current_user_id=current_user.get_id(),\n full_file=False)",
"def configure_plugin_file(self, data):\n # Add source.\n logger.info(\"Configure plugin file, data %s\" %json.dumps(data))\n source_tag = str()\n\n #default conf\n default_conf = data.get('default_conf', {})\n if default_conf:\n memory_buffer_limit = default_conf.get('Mem_Buf_Limit','')\n refresh_Interval = default_conf.get('Refresh_Interval','')\n else:\n memory_buffer_limit = MEMORY_BUFFER_LIMIT\n refresh_Interval = REFRESH_INTERVAL\n ui_config = data.get('config', {})\n logpath = ''\n logfilter = ''\n ignorelines = ''\n if ui_config:\n logpath = ui_config.get('log_paths','')\n logfilter = ui_config.get('filters',{}).get('level',[])\n ignorelines = ui_config.get('ignore_older_than','')\n\n if ignorelines:\n if not(ignorelines[:-1].isdigit() and (ignorelines[-1:] == 'm' or ignorelines[-1:] == 'h' or ignorelines[-1:] == 'd')):\n ignorelines = ''\n\n if not logfilter:\n logfilter = ['error']\n \n lines = ['[INPUT]']\n for key, val in data.get('input', {}).iteritems():\n if logpath and key == 'Path':\n val = logpath\n lines.append(' ' + str(key) + ' ' + str(val))\n lines.append(' ' + 'Refresh_Interval' + ' ' + str(refresh_Interval))\n lines.append(' ' + 'Mem_Buf_Limit' + ' ' + str(memory_buffer_limit))\n lines.append(' ' + 'Tag' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'Path_Key' + ' ' + 'file')\n lines.append(' ' + 'Skip_Long_Lines' + ' ' + 'On')\n\n if ignorelines:\n lines.append(' ' + 'Ignore_Older' + ' ' + str(ignorelines))\n lines.append('')\n\n for filter in data.get('filters', []):\n lines.append('[FILTER]')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n for key, val in filter.iteritems():\n lines.append(' ' + str(key) + ' ' + str(val))\n lines.append('')\n #check tags\n if self.tags or data.get('transform', {}):\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' record_modifier')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n if self.tags:\n for tag_key, tag_val in self.tags.items():\n lines.append(' ' + 'Record'+ ' ' +'_tag_' + str(tag_key) + ' ' + str(tag_val))\n if data.get('transform', {}):\n for key, val in data.get('transform', {}).iteritems():\n lines.append(' ' + 'Record'+ ' ' + str(key) + ' ' + str(val))\n lines.append('')\n\n \n if logfilter:\n loglevels = ''\n for logf in logfilter:\n loglevels+=str(logf)+','\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' record_modifier')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'Record'+ ' ' + 'log_filters' + ' ' + str(loglevels[:-1]))\n lines.append('')\n\n #adding timestamp\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' lua')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'script' + ' ' + LUA_SCRIPTFILE)\n lines.append(' ' + 'call' + ' ' + 'addtime_millisecond')\n lines.append('')\n\n #adding timestamp\n \n if data.get('functions', []):\n for fun in data.get('functions', []):\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' lua')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'script' + ' ' + LUA_SCRIPTFILE)\n lines.append(' ' + 'call' + ' ' + str(fun))\n lines.append('')\n\n #adding logfilter\n if logfilter:\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' lua')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'script' + ' ' + LUA_SCRIPTFILE)\n lines.append(' ' + 'call' + ' ' + 'filter_log')\n lines.append('')\n\n if logfilter:\n lines.append('[FILTER]')\n lines.append(' ' + 'Name' + ' record_modifier')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'Remove_key'+ ' ' + 'log_filters')\n lines.append(' ' + 'Remove_key'+ ' ' + '@timestamp')\n lines.append('')\n\n for x_targets in self.targets:\n if STATUS not in x_targets:\n if x_targets.get('store_type') and data.get('collection_type','') == x_targets.get('store_type'):\n lines.append('[OUTPUT]')\n lines.append(' ' + 'Name' + ' es')\n lines.append(' ' + 'Match' + ' ' + str(data.get('name','')))\n lines.append(' ' + 'Buffer_Size' + ' ' + '2MB')\n if x_targets.get('host',''):\n lines.append(' ' + 'Host' + ' ' + str(x_targets.get('host','')))\n if x_targets.get('port',''):\n lines.append(' ' + 'Port' + ' ' + str(x_targets.get('port','')))\n if x_targets.get('index',''):\n lines.append(' ' + 'Index' + ' ' + str(x_targets.get('index','')) + '_write')\n if x_targets.get('username',''):\n lines.append(' ' + 'HTTP_User' + ' ' + str(x_targets.get('username','')))\n if x_targets.get('password',''):\n password = x_targets.get('password','')\n lines.append(' ' + 'HTTP_Passwd' + ' ' + str(password))\n if x_targets.get('protocol','') and x_targets.get('protocol','') == 'https':\n lines.append(' ' + 'tls' + ' On')\n lines.append(' ' + 'tls.verify' + ' Off')\n lines.append(' ' + 'Type' + ' ' + DOCUMENT)\n #if plugin_name != \"linux-syslog\":\n # lines.append(' ' + 'Time_Key' + ' ' + 'time')\n lines.append('')\n\n filename = self.plugin_path + os.path.sep + data.get('name') +'.conf'\n self.plugin_post_data.append((filename, '\\n'.join(lines)))\n return True",
"def generate_config(cls, path: str =None) -> None:\n if path is None:\n path = \"config.yaml\"\n src_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"resources\", \"config.yaml\")\n shutil.copy(src_path, path)",
"def set_resources_file():\n # Validate the request body contains JSON\n if not request.is_json:\n return jsonify(\"Error\", \"No json data received\"), 400\n # Parse the JSON into a Python dictionary\n req = request.get_json(silent=True)\n return update_json_file(data=req,\n schema_name=\"set_resources\",\n target_file_path=\"/data/jsons/resources.json\",\n current_user_id=current_user.get_id(),\n full_file=True)",
"def init(deployment_dir, force):\n template = os.path.join(HERE, \"template\", \"database_config_template.yml\")\n destination = os.path.join(deployment_dir, \"config.yml\")\n if os.path.isfile(destination) and not force:\n click.echo(\"Config file already exists. Specify --force to overwrite it.\")\n return\n shutil.copy(template, destination)\n click.echo(\"Created {}\".format(destination))\n click.echo(\"You still need to modify it to fill correct parameters.\")",
"def set_resource_paths(cls, resource_paths):\n pyglet.resource.path = resource_paths\n pyglet.resource.reindex() # Refresh the path index",
"def init_data():\n data_info = load_data_config_file()\n reports, institutions = data_info[\"reports\"], data_info[\"institutions\"].keys()\n csv_file_info = get_preprocess_data(reports, institutions, mode='w')\n return update_data_config_file(csv_file_info)",
"def _conf_addloc(app, args):\n conf_fof = get_conf_locations_fof(app.name)\n confloc = load_conf_locations(conf_fof)\n confloc[args.name] = args.location\n save_conf_locations(conf_fof, confloc)",
"def AddConfigFile(self, argin):\n self.debug_stream(\"In AddConfigFile()\")\n argout = False\n #----- PROTECTED REGION ID(PlexilPlanStorage.AddConfigFile) ENABLED START -----#\n try:\n path = PyTango.Database().get_class_property(sys.argv[0], \"StorageDirPath\")[\"StorageDirPath\"][0]\n argin = argin.split(\";\")\n source, dest = argin[0], path + argin[1]\n command = 'cp' + ' ' + source + ' ' + dest\n val = subprocess.check_call(command, shell=True)\n if val == 0:\n argout = True\n except Exception as e:\n argout = False\n return argout \n #----- PROTECTED REGION END -----#\t//\tPlexilPlanStorage.AddConfigFile\n return argout",
"def fill_site(self):\n self.init_command.copy_sample_site(self.target_dir)\n self.init_command.create_configuration(self.target_dir)",
"def copy_yaml_and_set_data_dirs(in_path, out_path, data_dir=None):\n from utime.hyperparameters import YAMLHParams\n hparams = YAMLHParams(in_path, no_log=True, no_version_control=True)\n\n # Set values in parameter file and save to new location\n data_ids = (\"train\", \"val\", \"test\")\n for dataset in data_ids:\n path = os.path.join(data_dir, dataset) if data_dir else \"Null\"\n dataset = dataset + \"_data\"\n if hparams.get(dataset) and not hparams[dataset].get(\"data_dir\"):\n hparams.set_value(dataset, \"data_dir\", path, True, True)\n hparams.save_current(out_path)",
"def load(self):\r\n try:\r\n c = open(os.path.dirname(__file__) + \"/../config.yaml\")\r\n except Exception:\r\n c = open(os.path.dirname(__file__) + \"/config.yaml\")\r\n\r\n self.data = yaml.load(c)",
"def __init__(self, wtss_config, wtss_resources=None):\n self.wtss_config=wtss_config\n if wtss_resources:\n self.wtss_resources=wtss_resources\n else:\n try : \n self.wtss_resources=ConfigReader(file=self.wtss_config.data_paths.get('resource_conf'))\n except IOError, e:\n logging.critical(\"Can't find the resources configuration file at %s: exit\"%self.wtssConfig.data_paths.get('resource_conf'))",
"def _copy_resources(self):\n resources = {}\n for _, article in self.config.special_articles.iteritems():\n resources.update(article.full['local_references'])\n for article in self.config.articles_by_date:\n resources.update(article.full['local_references'])\n for source, dest in resources.iteritems():\n dest_file = os.path.join(self.config.output_dir, *dest)\n url = self.config.url + '/'.join(*dest)\n logger.info('Writing resource \\'%s\\'...' % url)\n self.fileproc.copy_file(source, dest_file)",
"def update_from_yaml(\n self, path: str = join(\"config\", \"hdx_user_static.yml\")\n ) -> None:\n super().update_from_yaml(path)",
"def install_resource(from_path, to_path):\n\n if from_path.startswith(\"http\"):\n if not os.path.isfile(to_path):\n install_http_resource(from_path, to_path)\n else:\n install_local_resource(from_path, to_path)",
"def _setup_resources(self, requests, resources, save_state, force_validate,\n base_work_dir, config, enable_debug, skip_init):\n for resource, request in izip(resources, requests):\n\n resource.set_sub_resources()\n\n self._propagate_attributes(resource=resource, config=config,\n save_state=request.save_state and save_state,\n force_validate=request.force_validate or force_validate)\n\n resource.set_work_dir(request.name, base_work_dir)\n resource.logger.debug(\"Resource %r work dir was created under %r\",\n request.name, base_work_dir)\n\n if enable_debug is True:\n resource.enable_debug()\n\n self._initialize_resource(resource, skip_init)\n\n yield (request.name, resource)",
"def add_resources(app):\n api.add_resource(Register, '/register')\n api.add_resource(UserLogin, '/login')\n api.add_resource(LoginOtpGenerate, '/login/otp_generate')\n api.add_resource(LoginOtpVerify, '/login/otp_verify')\n api.add_resource(OauthLogin, '/login/oauth')\n api.add_resource(UserLogout, '/logout')\n api.add_resource(HotelListing, '/hotel_listing')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse string template and or copy dictionary template.
|
def _get_template_dict(template):
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError("Input template should be a string or dictionary")
|
[
"def parse_template(tmplt, type_dict, all_permutations=False, codes={}, read_only_codes=False,\n c=1, w='', must_choose_ind=None):\n if tmplt.startswith('$'):\n if ':' in tmplt:\n _cv = tmplt[1:].partition(':')\n if _cv[0] in codes:\n return parse_template(_cv[2], type_dict, False, codes, read_only_codes, c, w, codes[_cv[0]][0])\n else:\n raise Exception(\"Provided code {} not in codes dictionary in {}.\".format(_cv[0], tmplt))\n elif tmplt[1:] in codes:\n return [codes[tmplt[1:]][1]]\n else:\n raise Exception(\"Invalid format: expected ':' when starting with '$' for input\", tmplt)\n i = 0\n s = len(tmplt)\n sep_inds = [] # alternative values separted by '/'\n open_brackets = 0\n while i < s:\n if tmplt[i] == '/' and open_brackets <= 0:\n sep_inds.append(i)\n elif tmplt[i] == '[':\n open_brackets += 1\n elif tmplt[i] == ']':\n open_brackets -= 1\n i += 1\n\n if len(sep_inds) > 0: # some '/' found outside brackets\n sep_inds = [-1] + sep_inds + [s]\n if all_permutations:\n res = []\n for i in range(1, len(sep_inds)):\n _t = tmplt[sep_inds[i - 1] + 1:sep_inds[i]]\n if i == 1:\n _t = _t.rstrip()\n elif i == len(sep_inds)-1:\n _t = _t.lstrip()\n else:\n _t = _t.strip()\n res += parse_template(_t, type_dict, True)\n return res\n else:\n if must_choose_ind is not None:\n i = must_choose_ind\n else:\n i = rnd.randint(1, len(sep_inds))\n\n _t = tmplt[sep_inds[i - 1] + 1:sep_inds[i]]\n\n if i == 1:\n _t = _t.rstrip()\n elif i == len(sep_inds)-1:\n _t = _t.lstrip()\n else:\n _t = _t.strip()\n\n if not read_only_codes:\n codes[w[:-1]] = (i, _t)\n\n return parse_template(_t, type_dict, False, codes, read_only_codes, c, w)\n\n i = open_brackets = 0\n a = b = -1\n while i < s:\n if tmplt[i] == '[':\n open_brackets += 1\n if a == -1:\n a = i\n elif tmplt[i] == ']':\n open_brackets -= 1\n if a != -1 and open_brackets == 0:\n b = i\n break\n i += 1\n\n if i < s: # some stuff found inside brackets\n if all_permutations:\n res = []\n for rright in parse_template(tmplt[b + 1:], type_dict, True):\n for rmid in parse_template(tmplt[a + 1:b], type_dict, True):\n _rright = rright\n _rmid = rmid\n res.append(tmplt[:a] + _rmid + _rright)\n return res\n else:\n return [tmplt[:a]\n + parse_template(tmplt[a + 1:b], type_dict, False, codes, read_only_codes, 1, w+str(c)+'_')[0]\n + parse_template(tmplt[b + 1:], type_dict, False, codes, read_only_codes, c+1, w)[0]]\n\n # no '/' or brackets found up to this point\n if tmplt in type_dict:\n tmplt = expand_type(tmplt, type_dict)\n return parse_template(tmplt, type_dict, all_permutations, codes, read_only_codes, c, w, must_choose_ind)\n elif tmplt.startswith('range'):\n _range = eval(tmplt)\n _val = str(rnd.randint(_range.start, _range.stop))\n if not read_only_codes:\n codes[w[:-1]] = (1, _val)\n return [_val]\n elif tmplt.startswith('clocktime'):\n if '(' in tmplt:\n _h, _m = eval(tmplt.partition('(')[2].partition(')')[0])\n else:\n _h = rnd.randint(1, 24)\n _m = rnd.randint(0, 60)\n\n if _h > 12:\n _h -= 12\n _tag = 'pm'\n else:\n _tag = 'am'\n\n _val = \"{:01}:{:02}{}\".format(_h, _m, _tag)\n if not read_only_codes:\n codes[w[:-1]] = (1, _val)\n return [_val]\n else:\n return [tmplt]",
"def read_template(self, template, space=None):\n pass",
"def processString(self, template, vars=None, options=None):\n return self.process(Literal(template), vars, options)",
"def parse_file(vars_dicts, src, dest, ignore_undefined=False):\n source_fh = open(src, 'rb')\n dest_fh = open(dest, 'wb')\n\n if DEBUG:\n print \"Processing {} into {}\".format(source_fh, dest_fh)\n\n if ignore_undefined:\n template = Template(source_fh.read())\n else:\n template = Template(source_fh.read(), undefined=StrictUndefined)\n applied_template = template.render(vars_dict)\n dest_fh.write(applied_template)",
"def apply_template(template, subst):\n\t\n\tif not template:\n\t\tprint 'xx> No template given'\n\t\treturn None\n\t\n\t# apply the known items\n\tapplied = template\n\tfor k, v in subst.iteritems():\n\t\tapplied = re.sub('\\{\\{\\s*' + k + '\\s*\\}\\}', v if v else '', applied)\n\t\n\t# remove unknown items\n\tmatches = re.findall('\\{\\{[^\\}]+\\}\\}', applied)\n\tif matches is not None:\n\t\tfor match in matches:\n\t\t\tapplied = applied.replace(match, '')\n\t\n\treturn applied",
"def _parse_context_string(data : dict, value : str) -> Any:\n # Find all context values in string.\n contexts = re.findall(\"({[<%#:]{1} [\\S]+ [%#:>]{1}})\", value)\n # If there is no any context values in string,\n # return the string itself.\n if len(contexts) == 0:\n return value\n # If value is just a context value, \n # return the value of the context item instead of a string.\n if len(contexts) == 1 and value.strip() == contexts[0]:\n return ConduitStep._parse_context_tag(data, contexts[0])\n else:\n val = value\n for item in contexts:\n val = ConduitStep._parse_context_string(data, val.replace(item, ConduitStep._parse_context_tag(data, item)))\n return val",
"def process_string(self, string):\n template = self.tpl_env.from_string(string)\n out = template.render(self.tpl_context)\n return out",
"def option_template_parse(\n self,\n packed_data, # type: struct\n sensor, # type: str\n pointer # type: int\n ):\n (option_template_id,option_scope_length,option_length) = self.unpack('!HHH',packed_data[pointer:pointer+6])\n pointer += 6 # Move ahead 6 bytes\n \n cache = {}\n hashed_id = hash(str(sensor)+str(option_template_id)) # Hash for individual sensor and template ID\n cache[hashed_id] = {}\n cache[hashed_id][\"Sensor\"] = str(sensor)\n cache[hashed_id][\"Template ID\"] = option_template_id\n cache[hashed_id][\"Type\"] = \"Options Template\"\n cache[hashed_id][\"Scope Fields\"] = self.OrderedDict()\n cache[hashed_id][\"Option Fields\"] = self.OrderedDict()\n\n for x in range(pointer,pointer+option_scope_length,4):\n (scope_field_type,scope_field_length) = self.unpack('!HH',packed_data[x:x+4])\n cache[hashed_id][\"Scope Fields\"][scope_field_type] = scope_field_length\n \n pointer += option_scope_length\n\n for x in range(pointer,pointer+option_length,4):\n (option_field_type,option_field_length) = self.unpack('!HH',packed_data[x:x+4])\n cache[hashed_id][\"Option Fields\"][option_field_type] = option_field_length\n \n pointer += option_length\n return cache",
"def parse_string(string, data):\n jenv = jinja2.Environment(loader=jinja2.BaseLoader()).from_string(string)\n _prepare_jenv(jenv)\n return jenv.render(**data)",
"def interpolate_values(cls, string, json_dict):\n str_cpy = string\n try:\n pattern = re.compile(r\"{{[a-z|.|_]*}}\")\n for match in re.findall(pattern, str_cpy):\n match_str = match.replace('{{', '').replace('}}', '')\n value = cls.get_by_complex_key(json_dict, match_str)\n str_cpy = str_cpy.replace(match, str(value))\n except Exception as e:\n print(\"Error interpolating values for string: \" + string)\n print(e)\n raise Exception(e)\n # print(\"Interpolated string: \" + str_cpy)\n return str_cpy",
"def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)",
"def _auto_parse(data_matrix, template, ignore_dict={}):\n # Get all placeholders\n placeholders = re.findall(r\"\\$autoparse_\\d{2}\", template)\n\n placelement_map = {}\n placelement_map.update(ignore_dict)\n\n for placeholder in placeholders:\n pos_x = int(re.findall(r\"\\d\", placeholder)[0])\n pos_y = int(re.findall(r\"\\d\", placeholder)[1])\n try:\n placelement_map[placeholder[1:]] = saxutils.escape(data_matrix[pos_x][pos_y])\n except IndexError as _:\n logging.fatal(f\"Invalid auto-parse placeholder: {placeholder}\")\n exit(1)\n \n try:\n return string.Template(template).substitute(placelement_map)\n except KeyError as e:\n logging.fatal(f\"Invalid auto-parse placeholder: {e}\")\n exit(1)",
"def process(self, data):\n if not self.template:\n fh = open(self.file, 'r')\n self.template = fh.read()\n fh.close()\n\n for param in self.params:\n if not data.has_key(param):\n raise ValueError(\"Missing required template parameter %s.\" % param)\n\n return self.template % data",
"def parseTemplate(self, template):\n\n pos = 0\n result = []\n while 1:\n match = self.tag_re.search(template, pos)\n if match is None:\n break\n\n pos = match.end()\n tag = match.group(1)\n\n fields = self.parseTag(tag)\n if fields is None:\n continue\n\n params = {}\n params['tag'] = tag\n params.update(fields);\n\n result.append(params)\n\n return result if len(result) else None",
"def parse_email(template, **args):\n with open(template, 'r') as f:\n text = f.read()\n personalized_template = text.format(**args)\n tokens = tokenize(personalized_template, RE, IGNORE)\n header = {k:v.strip() for k, v in tokens\n if k in HEADER_LABELS}\n body = tokens[-1][-1].strip()\n email = {'Destination': {'ToAddresses': [header['TO']]},\n 'Message': {'Body': {'Text': {'Charset': 'UTF-8', 'Data': body}},\n 'Subject': {'Charset': 'UTF-8', 'Data': header['SUBJECT']}},\n 'Source': header['FROM']}\n region = header['REGION']\n return email, region",
"def _parse_line(self, line):\n pattern = r'{{(.*?)}}'\n line = re.sub(pattern, self.resolve_key, line)\n\n return line",
"def parse_template(template, substitutions):\n s = Template(template)\n html = \"\"\n\n try:\n html = s.substitute(substitutions)\n except Exception, e:\n log_out(\"Error parsing template\")\n\n return html",
"def direct_from_string(text: str) -> dict:\n return PlainTextObject(text=text).to_dict()",
"def preprocess(template_filename: str) -> Tuple[Dict[str, Operand], List[str]]:\n operand_to_type_mapping: Dict[str, Operand] = {}\n code: List[str] = []\n \n with open(template_filename) as template:\n for line in template:\n with RegexSwitch(line.strip()) as case:\n if case(_PREPROCESS_DEFINE):\n match = _PREPROCESS_DEFINE.fullmatch(line.strip())\n operand, operand_type = map(str.strip, match.group(1).split('='))\n with Switch(operand_type) as case:\n if case('CONST'):\n operand_to_type_mapping[operand] = Operand.CONST\n elif case('MEM'):\n operand_to_type_mapping[operand] = Operand.MEM\n elif case('REG32'):\n operand_to_type_mapping[operand] = Operand.REG32\n elif case('REG16'):\n operand_to_type_mapping[operand] = Operand.REG16\n else:\n raise TypeError(f'{operand_type} is not supported by the parser.')\n elif case(_PREPROCESS_COMMENT):\n continue\n elif line.strip() != '':\n code.append(line)\n\n return (operand_to_type_mapping, code)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct the request body to create application.
|
def _create_application_request(app_metadata, template):
app_metadata.validate(["author", "description", "name"])
request = {
"Author": app_metadata.author,
"Description": app_metadata.description,
"HomePageUrl": app_metadata.home_page_url,
"Labels": app_metadata.labels,
"LicenseBody": app_metadata.license_body,
"LicenseUrl": app_metadata.license_url,
"Name": app_metadata.name,
"ReadmeBody": app_metadata.readme_body,
"ReadmeUrl": app_metadata.readme_url,
"SemanticVersion": app_metadata.semantic_version,
"SourceCodeUrl": app_metadata.source_code_url,
"SpdxLicenseId": app_metadata.spdx_license_id,
"TemplateBody": template,
}
# Remove None values
return {k: v for k, v in request.items() if v}
|
[
"def _create_application(\n self,\n name,\n client_type=None,\n grant_type=None,\n capability=None,\n user=None,\n data_access_type=None,\n end_date=None,\n **kwargs\n ):\n client_type = client_type or Application.CLIENT_PUBLIC\n grant_type = grant_type or Application.GRANT_PASSWORD\n # This is the user to whom the application is bound.\n dev_user = user or User.objects.create_user(\"dev\", password=\"123456\")\n application = Application.objects.create(\n name=name,\n user=dev_user,\n client_type=client_type,\n authorization_grant_type=grant_type,\n **kwargs\n )\n\n if data_access_type:\n application.data_access_type = data_access_type\n\n if end_date:\n application.end_date = end_date\n\n if data_access_type or end_date:\n application.save()\n\n # add capability\n if capability:\n application.scope.add(capability)\n return application",
"def create_applicant():\n uid = uniqid.generate()\n name = bottle.request.json['name']\n applicant = itw.Applicant.create(uid=uid, name=name)\n return json.dumps(applicant.json, indent=\" \")",
"def create_app(instanceAddress, appName, description, permission=[\n 'read:account',\n 'write:account',\n 'read:blocks',\n 'write:blocks',\n 'read:drive',\n 'write:drive',\n 'read:favorites',\n 'write:favorites',\n 'read:following',\n 'write:following',\n 'read:messaging',\n 'write:messaging',\n 'read:mutes',\n 'write:mutes',\n 'write:notes',\n 'read:notifications',\n 'write:notifications',\n 'read:reactions',\n 'write:reactions',\n 'write:votes'\n], callbackUrl=None): # pragma: no cover\n res = requests.post(f\"https://{instanceAddress}/api/app/create\", data=json.dumps({'name': appName, 'description': description, 'permission': permission, 'callbackUrl': callbackUrl}), headers={'content-type': 'application/json'})\n\n if res.status_code != 200:\n raise MisskeyAPIException('/app/create', 200, res.status_code, res.text)\n else:\n return json.loads(res.text)",
"def _create_application(self):\n key = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n\n if not self.interactive:\n name = 'tortuga-{}'.format(key)\n else:\n name = ''\n while not name:\n name = input(self.format('Application name: '))\n name = name.strip()\n\n if not self.interactive:\n url = 'https://univa.com/tortuga/{}'.format(key)\n else:\n url = ''\n while not url_valid(url):\n url = input(self.format('Application URL (a unique URI): '))\n\n password = secrets.token_urlsafe()\n\n print('Creating application...')\n\n try:\n application = self._run_az([\n 'ad', 'app', 'create',\n '--display-name', name,\n '--native-app', 'false',\n '--identifier-uris', url,\n '--key-type', 'Password',\n '--password', password\n ])\n #\n # Attach password to the application object so we can refer to\n # it later.\n #\n application['password'] = password\n self._az_applications.append(application)\n\n except APIError as e:\n print(self.format_error(str(e)))\n return self._create_application()\n\n #\n # Create the Service Principal\n #\n print('Creating service principal...')\n\n self._run_az([\n 'ad', 'sp', 'create',\n '--id', application['appId']\n\n ])\n\n print(self.format('The following application API password was '\n 'generated: {}', password))\n\n return application",
"def _create_application_version_request(app_metadata, application_id, template):\n app_metadata.validate([\"semantic_version\"])\n request = {\n \"ApplicationId\": application_id,\n \"SemanticVersion\": app_metadata.semantic_version,\n \"SourceCodeUrl\": app_metadata.source_code_url,\n \"TemplateBody\": template,\n }\n return {k: v for k, v in request.items() if v}",
"def create_req(self):\n \n pass",
"def app_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/app/new', input_params, always_retry=always_retry, **kwargs)",
"def build(self, api_spec, request_data):\n pass",
"def buildRequest(self, payload):\n # type: (bytearray) -> bytearray\n pass",
"def create_request(self):\n date_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')\n present_time = date_time[0:-3] + 'Z'\n # Using the web service post() method to create request\n response = requests.post(url=bid_url, headers={'Authorization': self.api_key}, json={\n \"type\": self.bid_type.get(),\n \"initiatorId\": self.current_user.id,\n \"dateCreated\": present_time,\n \"subjectId\": Subject().get_id_by_name(self.subject.get()),\n \"additionalInfo\": {\"competency\": self.competency.get(), \"hours_per_week\": self.hours_per_session.get(),\n \"sessions_per_week\": self.sessions_per_week.get(),\n \"rate_per_session\": self.rate_per_session.get()}\n }\n )\n json_data = response.json()\n # Destroying current window and jumping to next screen by calling the main() method from the NewRequestDetails \n # class\n self.window.destroy()\n NewRequestDetails(json_data).main()",
"def _update_application_request(app_metadata, application_id):\n request = {\n \"ApplicationId\": application_id,\n \"Author\": app_metadata.author,\n \"Description\": app_metadata.description,\n \"HomePageUrl\": app_metadata.home_page_url,\n \"Labels\": app_metadata.labels,\n \"ReadmeBody\": app_metadata.readme_body,\n \"ReadmeUrl\": app_metadata.readme_url,\n }\n return {k: v for k, v in request.items() if v}",
"def create_message(self):\n request = self.create_request()\n headers = self.create_header_str()\n data = self.body\n return \"%s%s\\r\\n%s\" % (request, headers, data)",
"def create(user, args):\n # return jsonify(user.apps)\n\n app = App.create_for_user(\n user,\n args[\"name\"],\n description=args[\"description\"],\n url=args[\"url\"],\n image_url=args[\"image\"],\n )\n\n return app",
"def create_application(cls,\n client: algod.AlgodClient,\n creator_private_key: str,\n approval_program: bytes,\n clear_program: bytes,\n global_schema: algo_txn.StateSchema,\n local_schema: algo_txn.StateSchema,\n app_args: Optional[List[Any]],\n sign_transaction: bool = True) -> Union[Transaction, SignedTransaction]:\n creator_address = algo_acc.address_from_private_key(private_key=creator_private_key)\n suggested_params = get_default_suggested_params(client=client)\n\n txn = algo_txn.ApplicationCreateTxn(sender=creator_address,\n sp=suggested_params,\n on_complete=algo_txn.OnComplete.NoOpOC.real,\n approval_program=approval_program,\n clear_program=clear_program,\n global_schema=global_schema,\n local_schema=local_schema,\n app_args=app_args)\n\n if sign_transaction:\n txn = txn.sign(private_key=creator_private_key)\n\n return txn",
"def room_create(self):\n\t\treturn self.app.put('/room/create')",
"def build_resource(self):\n\n self.params = self.get_request_parameters()\n if self.params:\n self.endpoint = self.endpoint + '?' + urllib.urlencode(self.params)",
"def create_application(fv_tenant, application, **args):\n args = args['optional_args'] if 'optional_args' in args.keys() else args\n\n fv_ap = Ap(fv_tenant, application,\n prio=get_value(args, 'prio', DEFAULT_QOS).lower())\n return fv_ap",
"def create_blank_request(*args, **kwargs):\n if isinstance(kwargs.get('body'), str):\n kwargs['body'] = kwargs['body'].encode('utf8')\n return webob.Request.blank(*args, **kwargs)",
"def CreateSubAppId(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateSubAppId\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateSubAppIdResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct the request body to update application.
|
def _update_application_request(app_metadata, application_id):
request = {
"ApplicationId": application_id,
"Author": app_metadata.author,
"Description": app_metadata.description,
"HomePageUrl": app_metadata.home_page_url,
"Labels": app_metadata.labels,
"ReadmeBody": app_metadata.readme_body,
"ReadmeUrl": app_metadata.readme_url,
}
return {k: v for k, v in request.items() if v}
|
[
"def _create_application_request(app_metadata, template):\n app_metadata.validate([\"author\", \"description\", \"name\"])\n request = {\n \"Author\": app_metadata.author,\n \"Description\": app_metadata.description,\n \"HomePageUrl\": app_metadata.home_page_url,\n \"Labels\": app_metadata.labels,\n \"LicenseBody\": app_metadata.license_body,\n \"LicenseUrl\": app_metadata.license_url,\n \"Name\": app_metadata.name,\n \"ReadmeBody\": app_metadata.readme_body,\n \"ReadmeUrl\": app_metadata.readme_url,\n \"SemanticVersion\": app_metadata.semantic_version,\n \"SourceCodeUrl\": app_metadata.source_code_url,\n \"SpdxLicenseId\": app_metadata.spdx_license_id,\n \"TemplateBody\": template,\n }\n # Remove None values\n return {k: v for k, v in request.items() if v}",
"def _create_application_version_request(app_metadata, application_id, template):\n app_metadata.validate([\"semantic_version\"])\n request = {\n \"ApplicationId\": application_id,\n \"SemanticVersion\": app_metadata.semantic_version,\n \"SourceCodeUrl\": app_metadata.source_code_url,\n \"TemplateBody\": template,\n }\n return {k: v for k, v in request.items() if v}",
"def on_put(self, req, resp, appid):\n mapper = self.meta.get('mapper')\n app = mapper.application.Application.get_by_uid(appid)\n if app is None:\n raise falcon.HTTPInvalidParam('Application not found', 'appid')\n \n body = req.context['body']\n # look for changes to name, description, status, parameters, and data\n if 'name' in body:\n app.set_name(body['name'].strip())\n if 'description' in body:\n app.set_description(body['description'].strip())\n if 'status' in body:\n app.set_status(body['status'].strip())\n if 'jwt_secret' in body:\n app.set_jwt_secret(body['jwt_secret'].strip())\n if 'custom_data' in body and isinstance(body['custom_data'], dict):\n app.set_custom_data(body['custom_data'])\n if 'data' in body and isinstance(body['data'], list):\n # body['data'] = [{'key': 'spam', 'value': 'eggs'}, ...]\n app.set_data(body['data'])\n if 'parameters' in body and isinstance(body['parameters'], list):\n # body['parameters'] = [{'key': 'spam', 'datatype': 'and', 'default': 'eggs', 'description': 'spam and eggs'}, ...]\n app.set_paramameters(body['params'])\n \n app = mapper.application.Application.update_from_object(app)\n resp.body = {\"application\": app.to_dict()}\n return True",
"def build_request_body(\n properties_to_be_updated: Optional[Dict[str, Any]] = None,\n properties_to_be_removed: Optional[List[str]] = None) -> Dict[str, Any]:\n body = copy.deepcopy(_VALID_SINGLE_PRODUCT)\n product = body['entries'][0]['product']\n\n if properties_to_be_updated:\n for key, value in properties_to_be_updated.items():\n product[key] = value\n\n if properties_to_be_removed:\n for key in properties_to_be_removed:\n if key in product:\n del product[key]\n\n return body",
"def updateApp():\n newConfig = json.loads(request.data)\n logger.info('Method called with: {0}'.format(newConfig))\n\n system = ServiceOrchestrator(CONFIG_FILE)\n ok = system.updateApps(newConfig)\n\n return 'ok'",
"def get_update_command_body(args: Dict[str, Any], update_mask: List) -> Dict:\n body = {} # type:Dict[str,Any]\n if 'labels' in update_mask:\n # Add label dictionary to body\n body['labels'] = arg_dict_creator(args.get('labels'))\n\n if 'next_rotation_time' in update_mask:\n if str(args.get('next_rotation_time')).isdigit():\n # If next_rotation_time given is a timestamp enter it as is\n body['next_rotation_time'] = {'seconds': int(str(args.get('next_rotation_time')))}\n\n else:\n # If next_rotation_time is date string, convert it to timestamp\n body['next_rotation_time'] = {'seconds': int(datetime.strptime(str(args.get('next_rotation_time')),\n RFC3339_DATETIME_FORMAT).timestamp())}\n\n if 'purpose' in update_mask:\n # Add purpose enum to body\n body['purpose'] = kms.CryptoKey.CryptoKeyPurpose[args.get('purpose')].value\n\n if 'rotation_period' in update_mask:\n # Add rotation_period to body\n body['rotation_period'] = {'seconds': int(str(args.get('rotation_period')))}\n\n if 'primary.attestation' in update_mask or 'primary.state' in update_mask:\n # Init the 'primary' sub-dictionary\n body['primary'] = {}\n\n if 'primary.attestation' in update_mask:\n # Add attestation dict to 'primary' sub-dictionary\n body['primary']['attestation'] = arg_dict_creator(args.get('attestation'))\n\n if 'primary.state' in update_mask:\n # Add state enum to 'primary' sub-dictionary\n body['primary']['state'] = kms.CryptoKeyVersion.CryptoKeyVersionState[args.get('state')].value\n\n if 'version_template.algorithm' in update_mask or 'version_template.protection_level' in update_mask:\n # Init the 'version_template' sun-dictionary\n body['version_template'] = {}\n\n if 'version_template.algorithm' in update_mask:\n # Add algorithm enum to 'version_template' sun-dictionary\n val = kms.CryptoKeyVersion.CryptoKeyVersionAlgorithm[args.get('algorithm')].value\n body['version_template']['algorithm'] = val\n\n if 'version_template.protection_level' in update_mask:\n # Add protection_level to 'version_template' sun-dictionary\n val = kms.ProtectionLevel[args.get('protection_level')].value\n body['version_template']['protection_level'] = val\n\n return body",
"def build(self, api_spec, request_data):\n pass",
"def update_request():",
"def buildRequest(self, payload):\n # type: (bytearray) -> bytearray\n pass",
"def application_requestapi(self, application_requestapi):\n\n self._application_requestapi = application_requestapi",
"def __get_payload(self):\n payload = {}\n if self.http_method in ('POST', 'PUT'):\n payload = parse_body(self.request.body)\n else:\n for key, value in self.request.params.items():\n payload[key] = value\n return payload",
"def get_app_json(self):\n return {\n 'app_id': self.proj.app_id,\n 'app_package': self.package_name,\n 'app_version': str(self.version),\n 'app_project': self.proj.proj_name,\n 'app_language': 'Java'\n }",
"async def updatePlatformConfig(self, body=\"\"):\n payload = {}\n \n\n # Parameter validation\n schema = UserValidator.updatePlatformConfig()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import PlatformSchema\n schema = PlatformSchema()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/user/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/platform/config\", \"\"\"{\"required\":[{\"name\":\"company_id\",\"in\":\"path\",\"description\":\"Numeric ID allotted to a business account on Fynd Platform.\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"application_id\",\"in\":\"path\",\"description\":\"Alphanumeric ID allotted to an application created within a business account.\",\"required\":true,\"schema\":{\"type\":\"string\"}}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"name\":\"company_id\",\"in\":\"path\",\"description\":\"Numeric ID allotted to a business account on Fynd Platform.\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"application_id\",\"in\":\"path\",\"description\":\"Alphanumeric ID allotted to an application created within a business account.\",\"required\":true,\"schema\":{\"type\":\"string\"}}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"POST\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"post\", await create_url_without_domain(f\"/service/platform/user/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/platform/config\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body)",
"def requestBody(self):\n return self._body",
"def updateRequest(self, paramsFromResponse, extraParams):\r\n\r\n # https://portswigger.net/burp/extender/api/constant-values.html\r\n PARAM_BODY = 0x01\r\n PARAM_URL = 0x00\r\n\r\n request = self._requestResponse.getRequest()\r\n\r\n # loop over all the unique parameters that we scraped from the response\r\n for param in extraParams:\r\n\r\n # create a corresponding burp IParameter\r\n # weird stuff happens if there are spaces. not sure if other\r\n # characters will cause problems, but I think URL encoding could\r\n # interfere with the scanner so I'm trying to avoid it\r\n value = paramsFromResponse[param].replace(' ', '+')\r\n burpParam = helpers.buildParameter(param,\r\n value,\r\n PARAM_URL)\r\n # add it to the request\r\n request = helpers.addParameter(request, burpParam)\r\n\r\n return request",
"def update(self, body):\n self.body = body",
"def build_resource(self):\n\n self.params = self.get_request_parameters()\n if self.params:\n self.endpoint = self.endpoint + '?' + urllib.urlencode(self.params)",
"def update_asn(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_asn\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_asn`\")\n\n resource_path = '/beta/asn'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def build_replica_request(self) -> Request:\n request = Request()\n\n # Details\n request.version = self.request.version\n request.remoteIp = self.request.remote_ip\n request.protocol = self.request.protocol\n request.host = self.request.host\n request.hostName = self.request.host_name\n request.port = self.request.server_connection.stream.socket.getsockname()[1]\n request.uri = self.request.uri\n\n # Method\n request.method = self.request.method\n\n # Path\n request.set_path(self.request.path)\n\n # Headers\n for key, value in self.request.headers._dict.items():\n request.headers[key] = value\n request.headers[key.lower()] = value\n\n # Query String\n for key, value in self.request.query_arguments.items():\n request.queryString[key] = [x.decode() for x in value]\n if len(request.queryString[key]) == 1:\n request.queryString[key] = request.queryString[key][0]\n\n # Body\n if self.request.body_arguments:\n request.mimeType = 'application/x-www-form-urlencoded'\n for key, value in self.request.body_arguments.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n elif self.request.files:\n request.mimeType = 'multipart/form-data'\n for key, value in self.request.files.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.body.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x.body) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n else:\n request.mimeType = 'text/plain'\n try:\n request.bodyType = 'str'\n request.body = self.request.body.decode()\n except (AttributeError, UnicodeDecodeError):\n request.bodyType = BASE64\n request.body = _b64encode(self.request.body)\n request.bodySize = len(self.request.body)\n\n # Files\n request.files = self.request.files\n\n return request"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct the request body to create application version.
|
def _create_application_version_request(app_metadata, application_id, template):
app_metadata.validate(["semantic_version"])
request = {
"ApplicationId": application_id,
"SemanticVersion": app_metadata.semantic_version,
"SourceCodeUrl": app_metadata.source_code_url,
"TemplateBody": template,
}
return {k: v for k, v in request.items() if v}
|
[
"def create_application_version(self, version_name, version_date, application_id):\n params = {'versionName' : version_name, 'versionDate' : version_date}\n return self._request('POST', 'rest/applications/' + str(application_id) + '/version', params)",
"def _create_application_request(app_metadata, template):\n app_metadata.validate([\"author\", \"description\", \"name\"])\n request = {\n \"Author\": app_metadata.author,\n \"Description\": app_metadata.description,\n \"HomePageUrl\": app_metadata.home_page_url,\n \"Labels\": app_metadata.labels,\n \"LicenseBody\": app_metadata.license_body,\n \"LicenseUrl\": app_metadata.license_url,\n \"Name\": app_metadata.name,\n \"ReadmeBody\": app_metadata.readme_body,\n \"ReadmeUrl\": app_metadata.readme_url,\n \"SemanticVersion\": app_metadata.semantic_version,\n \"SourceCodeUrl\": app_metadata.source_code_url,\n \"SpdxLicenseId\": app_metadata.spdx_license_id,\n \"TemplateBody\": template,\n }\n # Remove None values\n return {k: v for k, v in request.items() if v}",
"def create_version(factor):\n # TODO: validate input\n code_file = request.files['code'].read()\n version = request.form.get(\"version\")\n\n err = name_node.create_version(factor, version, code_file)\n return resp_maker.make_response(err)",
"def create_application_version():\n beanstalk = boto3.client('elasticbeanstalk', region_name=os.environ['TF_VAR_aws_region'])\n application_not_found_re = r'^No Application named .*? found.$'\n\n try:\n beanstalk.create_application_version(\n ApplicationName=os.environ['TF_VAR_elastic_beanstalk_application_name'],\n VersionLabel=os.environ['TF_VAR_elastic_beanstalk_application_version'],\n SourceBundle={\n 'S3Bucket': os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n 'S3Key': os.environ['TF_VAR_elastic_beanstalk_s3_key']\n }\n )\n except botocore.exceptions.ClientError as e:\n if re.match(application_not_found_re, e.response['Error']['Message']):\n pass\n else:\n raise e",
"def build(self, api_spec, request_data):\n pass",
"def buildRequest(self, payload):\n # type: (bytearray) -> bytearray\n pass",
"def create_application_version(self, application_name, version_name, application_template, description,\n application_id=None):\n\n response = self.api.create_application_version(application_name=application_name,\n application_id=application_id,\n application_template=application_template,\n version_name=version_name,\n description=description)\n APIHelper().check_for_response_errors(response)\n return response.data['data']['id']",
"def create_build(Name=None, Version=None, StorageLocation=None, OperatingSystem=None):\n pass",
"def create_req(self):\n \n pass",
"def build_resource(self):\n\n self.params = self.get_request_parameters()\n if self.params:\n self.endpoint = self.endpoint + '?' + urllib.urlencode(self.params)",
"def test_create_version(self):\n response = self.client.open(\n '//models/{type}/versions'.format(type='type_example'),\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def _payload(self, **kwargs):\n\n self._payload_allowed_values(**kwargs)\n\n versions = [{\"name\": ver} for ver in kwargs[\"Versions\"]]\n\n if \"Components\" in kwargs:\n components = [{\"name\": com} for com in kwargs[\"Components\"]]\n else:\n components = []\n\n payload = {\n \"fields\": {\n \"project\":\n {\n \"key\": kwargs[\"Project\"]\n },\n \"summary\": kwargs[\"Summary\"],\n \"description\": kwargs[\"Description\"],\n \"issuetype\": {\n \"name\": kwargs[\"Issuetype\"]\n },\n \"components\": components,\n \"customfield_15160\": {\n \t\"value\": kwargs[\"Primary Component\"]\n },\n \"fixVersions\": [{\n \t\"name\": \"Triage\"\n }],\n \"customfield_10011\": [{\n \t\"value\": kwargs[\"Impact\"]\n }],\n \"versions\": versions,\n \"priority\":{\n \t\"name\": kwargs[\"Priority\"]\n }\n }\n }\n return payload",
"def add_version(self, data, **kwargs):\n data.update({\"schema_version\": self.SCHEMA_VERSION})\n return data",
"async def create_projectversions(request):\n params = await request.json()\n\n name = params.get(\"name\")\n architectures = params.get(\"architectures\", [])\n basemirror = params.get(\"basemirror\")\n project_id = parse_int(request.match_info[\"project_id\"])\n\n if not project_id:\n return web.Response(status=400, text=\"No valid project id received\")\n if not name:\n return web.Response(status=400, text=\"No valid name for the projectversion recieived\")\n if not basemirror or not (\"/\" in basemirror):\n return web.Response(status=400, text=\"No valid basemirror received (format: 'name/version')\")\n if not architectures:\n return web.Response(status=400, text='No valid architecture received')\n\n if not is_name_valid(name):\n return web.Response(status=400, text=\"Invalid project name!\")\n\n basemirror_name, basemirror_version = basemirror.split(\"/\")\n project = request.cirrina.db_session.query(Project).filter(Project.id == project_id).first()\n\n if not project:\n return web.Response(status=400, text=\"Project with id '{}' could not be found\".format(project_id))\n\n projectversion = (\n request.cirrina.db_session.query(ProjectVersion)\n .join(Project)\n .filter(ProjectVersion.name == name)\n .filter(Project.id == project.id)\n .first()\n )\n if projectversion:\n return web.Response(status=400, text=\"Projectversion already exists. {}\".format(\n \"And is marked as deleted!\" if projectversion.is_deleted else \"\"))\n\n buildvariants = get_buildvariants(request.cirrina.db_session, basemirror_name, basemirror_version, architectures)\n\n projectversion = ProjectVersion(name=name, project=project)\n projectversion.buildvariants = buildvariants\n request.cirrina.db_session.add(projectversion)\n request.cirrina.db_session.commit()\n\n logger.info(\"ProjectVersion '%s/%s' with id '%s' added\",\n projectversion.project.name,\n projectversion.name,\n projectversion.id,\n )\n\n project_name = projectversion.project.name\n project_version = projectversion.name\n\n await request.cirrina.aptly_queue.put({\"init_repository\": [\n projectversion.id,\n basemirror_name,\n basemirror_version,\n project_name,\n project_version,\n architectures]})\n\n return web.json_response({\"id\": projectversion.id, \"name\": projectversion.name})",
"def app_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/app/new', input_params, always_retry=always_retry, **kwargs)",
"def create_app(instanceAddress, appName, description, permission=[\n 'read:account',\n 'write:account',\n 'read:blocks',\n 'write:blocks',\n 'read:drive',\n 'write:drive',\n 'read:favorites',\n 'write:favorites',\n 'read:following',\n 'write:following',\n 'read:messaging',\n 'write:messaging',\n 'read:mutes',\n 'write:mutes',\n 'write:notes',\n 'read:notifications',\n 'write:notifications',\n 'read:reactions',\n 'write:reactions',\n 'write:votes'\n], callbackUrl=None): # pragma: no cover\n res = requests.post(f\"https://{instanceAddress}/api/app/create\", data=json.dumps({'name': appName, 'description': description, 'permission': permission, 'callbackUrl': callbackUrl}), headers={'content-type': 'application/json'})\n\n if res.status_code != 200:\n raise MisskeyAPIException('/app/create', 200, res.status_code, res.text)\n else:\n return json.loads(res.text)",
"def request_version_and_flags(self, req, msg):",
"def build(name, hash, size, uploads):\n p = Payload()\n\n p.name = name\n p.hash = hash\n p.size = size\n p.datetime = str(int(time.time()))\n p.uploads = uploads\n\n return p",
"def _generate_swagger_json(self, app):\n self._paths.extract_from_app(app)\n\n swagger_object = {\n \"swagger\": self.swagger_version,\n \"info\": {\n \"title\": self._title,\n \"version\": self._version\n },\n \"paths\": {}\n }\n self._paths.add_to_spec(swagger_object)\n self._definitions.add_to_spec(swagger_object)\n\n return swagger_object"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check whether the botocore ClientError is ConflictException.
|
def _is_conflict_exception(e):
error_code = e.response["Error"]["Code"]
return error_code == "ConflictException"
|
[
"def has_conflict(self,local_path):\n if self.verbose:\n self.log.info(\"(%s)\\n%s\" % (inspect.stack()[0][3],local_path))\n try:\n info = self.client.info2(local_path, recurse=False)\n if not info[0][1]['wc_info']['conflict_work']:\n self.log.error(\"conflict found in %s\" % (local_path))\n return False\n except Exception as e:\n self.log.error(e)\n return True",
"def is_fatal_error(err: ClientResponseError) -> bool:\n if err.status in (401, 409):\n return False\n return 400 <= err.status < 500",
"def _check_for_conflicts(self):\n metric_key = 'conflicts'\n if self._extra_field_names:\n futures_list = []\n with futures.ThreadPoolExecutor(max_workers=self._max_db_connections) as executor:\n if self._supports_imei_shards:\n for name, rstart, rend in partition_utils.physical_imei_shards(self._conn,\n tbl_name=self._staging_tbl_name):\n futures_list.append(executor.submit(self._check_for_conflicts_single_partition, name))\n else:\n futures_list.append(executor.submit(self._check_for_conflicts_single_partition,\n self._staging_tbl_name))\n\n # All futures should be done at this point as with block is exited above\n conflict_rows = []\n for f in futures_list:\n partial_conflicts = f.result()\n conflict_rows.extend(partial_conflicts)\n\n if not conflict_rows:\n return True, 'Conflicting rows check passed', metric_key\n\n confl_rows_sum = 0\n for x in conflict_rows:\n self._logger.debug('Found {count} '\n 'conflicting row(s) with primary key {pk_names}: {pk_values}'\n .format(count=x.dc,\n pk_names=tuple(self._pk_field_names),\n pk_values=tuple(getattr(x, pk) for pk in self._pk_field_names)))\n confl_rows_sum += x.dc\n return False, 'Conflicting rows check failed ({0:d} rows with same primary key and conflicting data)' \\\n .format(confl_rows_sum), metric_key\n\n return True, 'Conflicting rows check skipped due to lack of extra_fields', metric_key",
"def test_other_botocore_error(self):\n thrown_ex = botocore.exceptions.ClientError(\n {\"Error\": {\"Code\": \"500\"}}, \"S#Download\"\n )\n self._download_error_test(thrown_ex, botocore.exceptions.ClientError)",
"def test_integrity_error(self):\n\n client = graphene.test.Client(schema)\n executed = client.execute(self.SH_ADD_ORG,\n context_value=self.context_value)\n\n # Check database\n org = Organization.objects.get(name='Example')\n self.assertEqual(org.name, 'Example')\n\n # Try to insert it twice\n client = graphene.test.Client(schema)\n executed = client.execute(self.SH_ADD_ORG,\n context_value=self.context_value)\n\n msg = executed['errors'][0]['message']\n self.assertEqual(msg, DUPLICATED_ORG_ERROR)",
"def is_stack_already_exists_exception(ex):\n return ex.__class__.__name__ == 'AlreadyExistsException'",
"def _wrap_client_error(e):\n error_code = e.response[\"Error\"][\"Code\"]\n message = e.response[\"Error\"][\"Message\"]\n\n if error_code == \"BadRequestException\":\n if \"Failed to copy S3 object. Access denied:\" in message:\n match = re.search(\"bucket=(.+?), key=(.+?)$\", message)\n if match:\n return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))\n if \"Invalid S3 URI\" in message:\n return InvalidS3UriError(message=message)\n\n return ServerlessRepoClientError(message=message)",
"def is_no_updates_being_performed_exception(ex):\n if isinstance(ex, botocore.exceptions.ClientError):\n error = ex.response.get('Error', {})\n error_message = error.get('Message', 'Unknown')\n return error_message.endswith('No updates are to be performed.')\n else:\n return False",
"def handle_integrity_error(exc):\n if any(\n 'duplicate key value violates unique constraint \"{}\"'.format(constraint) in str(exc)\n for constraint in {\"services_name_key\", \"services_email_from_key\"}\n ):\n return (\n jsonify(\n result=\"error\",\n message={\n \"name\": [\n \"Duplicate service name '{}'\".format(exc.params.get(\"name\", exc.params.get(\"email_from\", \"\")))\n ]\n },\n ),\n 400,\n )\n current_app.logger.exception(exc)\n return jsonify(result=\"error\", message=\"Internal server error\"), 500",
"def test_no_duplicate_servers(self):\n with pytest.raises(corenlp.PermanentlyFailedException):\n with corenlp.CoreNLPClient(annotators=\"tokenize,ssplit\") as duplicate_server:\n raise RuntimeError(\"This should have failed\")",
"def check_scenario_conflicts(self) -> bool:\n\n conflict_dict = self.conflicts\n scenario_projects = [p.project for p in self.project_cards]\n\n for project, conflicts in conflict_dict.items():\n if conflicts:\n for name in conflicts:\n if name in scenario_projects:\n self.project_cards\n WranglerLogger.error(\n \"Projects %s has %s as conflicting project\"\n % (project, name)\n )\n self.has_conflict_error = True\n\n self.conflicts_checked = True\n\n return self.has_conflict_error",
"def testSingleObjectAlreadyExistRemoteError(self):\n old = tasks.rpc.conf['WRAP_REMOTE_ERRORS']\n tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = not old\n return self.testSingleObjectAlreadyExist()",
"def socksclienterror(self) :\n\t\ttry :\n\t\t\treturn self._socksclienterror\n\t\texcept Exception as e:\n\t\t\traise e",
"def on_revocation_error(self, response: requests.Response) -> bool:\n try:\n data = response.json()\n except ValueError:\n return False\n error = data.get(\"error\")\n error_description = data.get(\"error_description\")\n error_uri = data.get(\"error_uri\")\n if error is not None:\n exception_class = self.exception_classes.get(error, RevocationError)\n raise exception_class(error, error_description, error_uri)\n return False",
"def conflict_detection(self) -> Optional[str]:\n return pulumi.get(self, \"conflict_detection\")",
"def _credentials_error(self, error):\n if self._auth_config.local_credentials:\n return False\n return error['__type'] in _REFRESH_EXCEPTIONS",
"def check_sync_conflict_modal_presence(self) -> bool:\n element = \"//h1[contains(text(), 'Sync Conflict')]\"\n if self.check_element_presence(LocatorType.XPath, element, GigantumConstants.ELEMENT_PRESENCE_TIMEOUT.value):\n return True\n return False",
"def test_save_update_boto_error(self):\n resp = deepcopy(self.FAKE_ERROR_RESP)\n resp['Error']['Message'] = 'An error that I cannot handle happened'\n self._cf.update_stack.side_effect = ClientError(resp, '')\n\n # GOTCHA: S3 portion of the code is already covered by test_save_create() and test_save_update_success()\n # Skip through that part.\n with patch('krux_cloud_formation.cloud_formation.CloudFormation._is_stack_exists', MagicMock(return_value=True)):\n with self.assertRaises(ClientError):\n self._cfn.save(self.TEST_STACK_NAME)\n self._cf.update_stack.assert_called_once_with(\n StackName=self.TEST_STACK_NAME,\n TemplateURL=self.FAKE_URL\n )",
"def __CheckOldError(self, response_text):\n error_message_regex = OLD_ERROR_REGEX\n matches = re.search(error_message_regex, response_text)\n if matches:\n message = response_text\n if matches.group(3):\n message = matches.group(3)\n raise AdWordsApiError({'faultstring': message})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrap botocore ClientError exception into ServerlessRepoClientError.
|
def _wrap_client_error(e):
error_code = e.response["Error"]["Code"]
message = e.response["Error"]["Message"]
if error_code == "BadRequestException":
if "Failed to copy S3 object. Access denied:" in message:
match = re.search("bucket=(.+?), key=(.+?)$", message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
|
[
"def _mock_boto3_kwargs_exception(*args, **kwargs):\n raise ClientError(operation_name=\"\", error_response={})",
"def wrap_keystone_exception(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except keystone_exceptions.AuthorizationFailure:\n raise AuthorizationFailure(\n client=func.__name__, message=\"reason: %s\" % sys.exc_info()[1])\n except keystone_exceptions.ClientException:\n raise AuthorizationFailure(\n client=func.__name__,\n message=\"unexpected keystone client error occurred: %s\"\n % sys.exc_info()[1])\n return wrapped",
"def convert_exception(message: str, err: ClientError) -> PytleapError:\n if isinstance(err, ServerTimeoutError):\n return TimeoutError(message, err)\n if isinstance(err, ClientResponseError):\n if err.status in [401, 403]:\n return AuthenticationError(message, err)\n\n return CommunicationError(message)",
"def cli_exceptions():\n try:\n yield\n except (RepoException, ServiceException) as e:\n raise ClickException(e.message)",
"def test_other_botocore_error(self):\n thrown_ex = botocore.exceptions.ClientError(\n {\"Error\": {\"Code\": \"500\"}}, \"S#Download\"\n )\n self._download_error_test(thrown_ex, botocore.exceptions.ClientError)",
"def handle_service_exception(e):\n from azure.cli.core.azclierror import (\n AzureInternalError,\n AzureResponseError,\n BadRequestError,\n ForbiddenError,\n ResourceNotFoundError,\n UnauthorizedError,\n )\n\n err = unpack_msrest_error(e)\n op_status = getattr(e.response, \"status_code\", -1)\n\n # Generic error if the status_code is explicitly None\n if not op_status:\n raise AzureResponseError(err)\n if op_status == 400:\n raise BadRequestError(err)\n if op_status == 401:\n raise UnauthorizedError(err)\n if op_status == 403:\n raise ForbiddenError(err)\n if op_status == 404:\n raise ResourceNotFoundError(err)\n # Any 5xx error should throw an AzureInternalError\n if 500 <= op_status < 600:\n raise AzureInternalError(err)\n # Otherwise, fail with generic service error\n raise AzureResponseError(err)",
"def _log_client_exception(self, e):\n _log.exception('Client-side exception occurred')",
"def from_exception(ex):\n\n return BravehubPlatformException(inner_ex=ex)",
"def normalize_exceptions(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n message = \"Whoa, you found a bug.\"\n try:\n return func(*args, **kwargs)\n except requests.HTTPError as err:\n raise CommError(err.response, err)\n except RetryError as err:\n if \"response\" in dir(err.last_exception) and err.last_exception.response is not None:\n try:\n message = err.last_exception.response.json().get(\n 'errors', [{'message': message}])[0]['message']\n except ValueError:\n message = err.last_exception.response.text\n else:\n message = err.last_exception\n\n if env.is_debug():\n six.reraise(type(err.last_exception), err.last_exception, sys.exc_info()[2])\n else:\n six.reraise(CommError, CommError(\n message, err.last_exception), sys.exc_info()[2])\n except Exception as err:\n # gql raises server errors with dict's as strings...\n if len(err.args) > 0:\n payload = err.args[0]\n else:\n payload = err\n if str(payload).startswith(\"{\"):\n message = ast.literal_eval(str(payload))[\"message\"]\n else:\n message = str(err)\n if env.is_debug():\n six.reraise(*sys.exc_info())\n else:\n six.reraise(CommError, CommError(\n message, err), sys.exc_info()[2])\n\n return wrapper",
"def test_save_update_boto_error(self):\n resp = deepcopy(self.FAKE_ERROR_RESP)\n resp['Error']['Message'] = 'An error that I cannot handle happened'\n self._cf.update_stack.side_effect = ClientError(resp, '')\n\n # GOTCHA: S3 portion of the code is already covered by test_save_create() and test_save_update_success()\n # Skip through that part.\n with patch('krux_cloud_formation.cloud_formation.CloudFormation._is_stack_exists', MagicMock(return_value=True)):\n with self.assertRaises(ClientError):\n self._cfn.save(self.TEST_STACK_NAME)\n self._cf.update_stack.assert_called_once_with(\n StackName=self.TEST_STACK_NAME,\n TemplateURL=self.FAKE_URL\n )",
"async def a_error_handler(response: ClientResponse):\n resp = await response.json()\n if response.status != 200:\n if isinstance(resp, dict):\n if resp.get(\"Error\"):\n raise FindcloneError(f\"error_code: {response.status}\", resp[\"Error\"])\n\n return True",
"def transform_job_exception(e, error=None):\n if isinstance(e, EEServerError):\n return NarrativeException(e.code, e.message, e.name, \"ee2\", error)\n elif isinstance(e, HTTPError):\n code = e.response.status_code\n if code == 404 or code == 502 or code == 503:\n # service not found\n msg = \"A KBase service is currently unavailable.\"\n elif code == 504 or code == 598 or code == 599:\n # service timeout\n msg = \"There was a temporary network connection error.\"\n elif code == 500:\n # internal error. dunno what to do.\n msg = \"An internal error occurred in the KBase service.\"\n else:\n msg = \"An untracked error occurred.\"\n return NarrativeException(\n e.response.status_code, msg, \"HTTPError\", \"network\", error\n )\n else:\n return NarrativeException(-1, str(e), \"Exception\", \"unknown\", error)",
"def test_gcb_list_ioc_failure_response_400(client, mocker):\n from GoogleChronicleBackstory import gcb_list_iocs_command\n\n mocker.patch(RETURN_ERROR_MOCK_PATH, new=return_error)\n\n mock_response = (\n Response(dict(status=400)),\n b'{\"error\": { \"code\": 400, \"message\": \"page not found\", \"status\": \"INVALID_ARGUMENT\" } }'\n )\n\n client.http_client.request.return_value = mock_response\n with pytest.raises(ValueError) as error:\n gcb_list_iocs_command(client, {})\n assert str(error.value) == 'Status code: 400\\nError: page not found'",
"def _catch_http_error_raise_gcs_api_error(format_str=None):\n return cloud_errors.catch_error_raise_cloud_api_error(\n apitools_exceptions.HttpError,\n cloud_errors.GcsApiError,\n format_str=format_str)",
"def _handle_grpc_exception(self, exception):\n if exception._state.code not in self._RETRY_STATUS_CODES:\n trailing_metadata = exception.trailing_metadata()\n google_ads_failure = self._get_google_ads_failure(\n trailing_metadata)\n\n if google_ads_failure:\n request_id = self._get_request_id(trailing_metadata)\n\n raise google.ads.google_ads.errors.GoogleAdsException(\n exception, exception, google_ads_failure, request_id)\n else:\n # Raise the original exception if not a GoogleAdsFailure.\n raise exception\n else:\n # Raise the original exception if error has status code\n # INTERNAL or RESOURCE_EXHAUSTED.\n raise exception",
"def test_with_scmclient_errors_from_get_repository_info(self):\n tempdir = make_tempdir()\n git_dir = os.path.realpath(os.path.join(tempdir, 'git-repo'))\n\n e = Exception('oh no')\n\n execute(['git', 'init', git_dir])\n\n self.spy_on(GitClient.get_repository_info,\n owner=GitClient,\n op=kgb.SpyOpRaise(e))\n\n scan_result = scan_scmclients_for_path(\n path=git_dir,\n scmclient_kwargs={\n 'options': {},\n })\n\n self.assertFalse(scan_result.found)\n self.assertIsNone(scan_result.local_path)\n self.assertIsNone(scan_result.scmclient)\n\n # Check the candidates.\n self.assertEqual(len(scan_result.candidates), 1)\n\n candidate = scan_result.candidates[0]\n self.assertEqual(candidate.local_path, git_dir)\n self.assertIsInstance(candidate.scmclient, GitClient)\n\n # Check the errors.\n self.assertEqual(scan_result.scmclient_errors, {\n 'git': e,\n })",
"def failed_request_exception(message, r):\n try:\n resp = json.loads(r.text)\n message = '%s: %d\\n%s' % (message, resp['error']['code'],\n resp['error']['message'])\n return click.ClickException(message)\n except ValueError:\n # fallback on raw text response if error is not structured.\n return click.ClickException('%s: %d\\n%s' % (message,\n r.status_code,\n r.text))",
"def wrap_pecan_controller_exception(func):\n def _func_server_error(log_correlation_id, status_code):\n pecan.response.status = status_code\n return {\n 'faultcode': 'Server',\n 'status_code': status_code,\n 'title': woutil.status_reasons[status_code],\n 'description': str(OBFUSCATED_MSG % log_correlation_id),\n }\n\n def _func_client_error(excp, status_code):\n pecan.response.status = status_code\n return {\n 'faultcode': 'Client',\n 'faultstring': convert_excp_to_err_code(excp.__class__.__name__),\n 'status_code': status_code,\n 'title': str(excp),\n 'description': str(excp),\n }\n\n return wrap_controller_exception(func,\n _func_server_error,\n _func_client_error)",
"def test_http_request_other_demisto_exception(mock_base_http_request, client):\n # Configure\n mock_base_http_request.side_effect = DemistoException('custom')\n\n # Execute\n with pytest.raises(Exception) as e:\n client.http_request('GET', MOCK_TEST_URL_SUFFIX)\n\n # Assert\n assert str(e.value) == 'custom'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Backup the MESSENGERuvvs database tables. Dump the MESSENGERuvvs data into SQL files that can be restored if
|
def databasebackups():
# Read in current config file if it exists
configfile = os.path.join(os.environ['HOME'], '.nexoclom')
datapath = None
if os.path.isfile(configfile):
for line in open(configfile, 'r').readlines():
key, value = line.split('=')
if key.strip() == 'datapath':
datapath = value.strip()
else:
pass
else:
pass
assert datapath is not None, 'Undefined datapath.'
# Get database name and port
database, port = database_connect(return_con=False)
mestables = ['capointing', 'cauvvsdata', 'caspectra',
'mgpointing', 'mguvvsdata', 'mgspectra',
'napointing', 'nauvvsdata', 'naspectra',
'mesmercyear']
for table in mestables:
print(f'Backing up {table}')
savef = os.path.join(datapath, f'UVVS_{table}.sql')
os.system(f"pg_dump -p {port} -t {table} {database} > {savef}")
|
[
"def backup_database():\n backup_filename = syt.make_project_path(\"/resources/database_backups/\"+syt.add_timestamp_to_filename(db.database))\n syt.log_info(\"Backing up the database\")\n syt.copy_file(db.database, backup_filename)\n syt.log_info(\"Backedup to {}\".format(backup_filename))",
"def perform_full_backup(secrets_env) -> str:\n dev_dump_filename = get_dump_filename('dev')\n prod_dump_filename = get_dump_filename('prod')\n perform_database_dump(dev_dump_filename, prod_dump_filename, secrets_env)\n dbx = get_dropbox_instance(secrets_env)\n dev_db_shared_link = upload_generated_dumps(dbx, dev_dump_filename, prod_dump_filename)\n os.remove(dev_dump_filename)\n os.remove(prod_dump_filename)\n return dev_db_shared_link",
"def dump_database(ctx):\n\n mappings = DataFrame()\n\n for source in ctx.obj['sources']:\n mappings = mappings.append(source.flat_fields, ignore_index=True)\n\n filepath = join(ROOT_DIR, SOURCE_DB)\n mappings.to_excel(filepath, sheet_name='All sourced fields', index=False)\n secho('Dumped the source database to {}'.format(filepath), **SUCCESS)",
"def backup_database(self):\n\t\ttime_string = strftime('%Y%m%d%H%M%S')\n\t\tfile = idc.GetInputFile()\n\t\tif not file:\n\t\t\traise NoInputFileException('No input file provided')\n\t\tinput_file = rsplit(file, '.', 1)[0]\n\t\tbackup_file = \"{:s}_{:s}.idb\".format(input_file, time_string)\n\t\tidc.SaveBase(backup_file, idaapi.DBFL_BAK)",
"def dumpdb(self):\n t = time.strftime('%Y-%m-%d-%H:%M', time.localtime())\n with open('/var/log/enstore/tmp/enstore/%s_%s.mtx_dbdump.out' % (self.name, t,), 'w') as of:\n try:\n of.write(self._listVolumes())\n except:\n pass",
"def restore_database():\n database_backup_menu()",
"def dump_DB(self):\n\t\tprint 'Dump data base....'\n\t\tstream = open(self.DB_file, 'w')\n\t\tpickle.dump(self.DB, stream)\n\t\tstream.close()\n\t\t#return ",
"def dbdev_restore_and_migrate(backupdirectory):\n _manage('dbdev_restore {}'.format(backupdirectory))\n migrate()",
"def backup():\n\n # assume your AWS access key and secret key is present in ~/.boto\n REGION = 'us-east-1'\n SRCTABLE = '*' # all tables\n LOG = 'DEBUG' # DEBUG|INFO|WARNING|ERROR|CRITICAL\n local('./dynamodump/dynamodump.py -m backup -r {REGION} -s \"{SRCTABLE}\" --log {LOG}'.format(**locals()))",
"def export_db():\n dump_name_zip = download_db()[0] # download_db returns a list\n dump_name = splitext(dump_name_zip)[0] # name without zip extension\n\n # env.host replaced with staging host\n with settings(host_string=env.config.staging_DB.host):\n # upload the zipped file\n dump_name_zip = put(dump_name_zip)[0] # put returns a list\n dump_name = splitext(dump_name_zip)[0] # name without zip extension\n\n # gunzip dump\n run('gunzip \"%s\"' % dump_name_zip)\n\n run('echo \"drop database if exists %s\" | psql' %\n env.config.staging_DB.name)\n run('echo \"create database %s\" | psql' % env.config.staging_DB.name)\n run('psql %s < \"%s\"' % (env.config.staging_DB.name, dump_name))\n env.config.save()\n\n # cleanup files\n run('rm -f \"%s\"' % dump_name) # raw file",
"def restore():\n backup_dir = f'{args.database}/backups/{datetime.strftime(backups[args.restore - 1], \"%d-%b-%Y_%H-%M-%S\")}'\n\n if os.path.isdir(f'{args.database}/orthologs'):\n shutil.rmtree(f'{args.database}/orthologs')\n shutil.copytree(f'{backup_dir}/orthologs', f'{args.database}/orthologs')\n\n if os.path.isdir(f'{args.database}/paralogs'):\n shutil.rmtree(f'{args.database}/paralogs')\n shutil.copytree(f'{backup_dir}/paralogs', f'{args.database}/paralogs')\n\n if os.path.isfile(f'{args.database}/metadata.tsv'):\n os.remove(f'{args.database}/metadata.tsv')\n shutil.copy(f'{backup_dir}/metadata.tsv', f'{args.database}/metadata.tsv')\n\n if os.path.isfile(f'{args.database}/tree_colors.tsv'):\n os.remove(f'{args.database}/tree_colors.tsv')\n shutil.copy(f'{backup_dir}/tree_colors.tsv', f'{args.database}/tree_colors.tsv')\n\n if os.path.isdir(f'{args.database}/proteomes'):\n shutil.rmtree(f'{args.database}/proteomes')\n shutil.copytree(f'{backup_dir}/proteomes', f'{args.database}/proteomes')",
"def database_backup():\n try:\n # backup the current ARM DB\n log.info(\"Backing up the current ARM DB\")\n currentime = datetime.datetime.now()\n filename = f\"arm_{currentime.year}-{currentime.month}-{currentime.day}_{currentime.hour}{currentime.minute}.db\"\n os.system(f\"mv {path_db}{file_db} {path_db}{filename}\")\n log.success(f\"current ARM DB saved {path_db}{filename}\")\n except Exception as error:\n log.error(\"Something has gone wrong, unable backup the database\")\n log.error(f\" - {error}\")",
"def backupDatabase(self):\n \n if self.ini['BACKUP LOCATION']:\n fout = open(self.ini['BACKUP LOCATION'] + \"journal_db\", \"wb\")\n else:\n self.changeBackupDirectory()\n fout = open(self.ini['BACKUP LOCATION'] + \"journal_db\", \"wb\")\n pickle.dump(self.journal, fout)\n fout.close()\n date = DateTools.getCurrentDate()\n self.ini['LAST BACKUP'] = date\n self.last_backup.set(DateTools.getDateGUIFormat(date))",
"def write_bkup_database(self):\n backup_file = open(\"database/\"+self.database_name+\"_bk.txt\",'w',encoding=\"utf8\")\n backup_file.write(json.dumps(self.data_list))\n backup_file.close()",
"def backup_databases(full_backup, path):\n\n # Look for given PATH. If it doesn't exist, create it.\n if path == '.':\n path = os.getcwd()\n path = str(Path(path))\n\n if not os.path.exists(path):\n os.mkdir(path)\n\n # Inspect FULL_BACKUP, generate SQL query accordingly.\n if full_backup:\n sql = \"SELECT * FROM SYS.DATABASES WHERE NAME NOT IN ('master','model','msdb','tempdb')\"\n else:\n exit_message = \"Script exited because only total backups have been implemented. Re-use command with option \" \\\n \"--full-backup True \"\n click.echo(exit_message)\n sys.exit()\n\n # Get the list of databases to backup.\n database_probe = DatabaseProbe()\n result_set = database_probe.execute_query(sql)\n\n # For each database, attempt to back it up. Log success / errors / failures accordingly.\n for database in result_set:\n try:\n sql = f\"BACKUP DATABASE \\\"{database}\\\" TO DISK = \\'{path}\\\\{database}.BAK\\' WITH INIT\"\n click.echo(f\"Executing: {sql}\")\n database_probe = DatabaseProbe() # Create a new probe for each backup execution. We have to do this\n # because the queries get pushed to SQL SERVER for execution. The connector object may not (very likely\n # isn't) closed before the next query is pushed.\n database_probe.execute_query(sql)\n click.secho(f\"SUCCESS: {database} BACKUP COMPLETE\", bold=True, fg=\"green\")\n except Exception as e:\n click.secho(f\"ERROR during {database} BACKUP attempt. {e}\", bold=True, fg=\"red\")\n finally:\n database_probe.dispose()",
"def backup_globals(self):\n\n logging.info(\"Start backup globals.\")\n\n cmd = [self.pg_dumpall, \"-g\", \"-h\", self.pg_host, \"-p\", self.pg_port, \"-U\", self.pg_user]\n\n if self.comp == \"gzip\":\n cmd.extend([\"|\", self.comp_path, \"-c\", \"-{0}\".format(self.level), \">\", \"{0}/globals.sql.gz\".format(self.output)])\n elif self.comp == \"7z\" or self.comp == \"7za\":\n cmd.extend([\"|\", self.comp_path, \"a\", \"-si\", \"-mx={0}\".format(self.level), \"{0}/globals.sql.7z\".format(self.output)])\n elif self.comp == \"xz\" or self.comp == \"lzma\":\n cmd.extend([\"|\", self.comp_path, \"-zfc\", \"-{0}\".format(self.level), \">\", \"{0}/globals.sql.xz\".format(self.output)])\n\n proc = subprocess.Popen(' '.join(cmd), env={\"PGPASSWORD\":self.postgres_password},\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n\n out, err = proc.communicate()\n rc = proc.returncode\n\n if rc == 0:\n logging.info(\"OK backup globals.\")\n return True\n else:\n raise Exception(err)\n return",
"def backup(folder=\".\", archive=None):\n # get database name (ie. hub_db internal database)\n db_name = get_src_dump().database.name\n dump = {}\n for getter in [\n get_src_dump,\n get_src_master,\n get_src_build,\n get_src_build_config,\n get_data_plugin,\n get_api,\n get_cmd,\n get_event,\n get_hub_config,\n ]:\n col = getter()\n dump[col.name] = []\n for doc in col.find():\n dump[col.name].append(doc)\n if not archive:\n archive = \"%s_backup_%s_%s.pyobj\" % (db_name, get_timestamp(), get_random_string())\n path = os.path.join(folder, archive)\n dumpobj(dump, path)\n return path",
"def dump_database_to_file():\r\n collection = get_collection_to_backup()\r\n\r\n # get the database and collection name to backup\r\n name = collection.full_name\r\n split = name.split('.', 1)\r\n db_name = split[0]\r\n collection_name = split[1]\r\n\r\n # the path to backup to\r\n path = os.path.abspath(os.path.join(os.path.curdir, name))\r\n\r\n try:\r\n # execute mongodump and backup the database to external directory\r\n backup_output = subprocess.check_output(\r\n [\r\n MONGODUMP_PATH + '\\mongodump',\r\n '--db', '%s' % db_name,\r\n '--collection', '%s' % collection_name,\r\n '--out', '%s' % path\r\n ])\r\n\r\n # delete the collection after it is backed up\r\n #delete_collection(collection)\r\n\r\n return path\r\n except Exception, err:\r\n print err\r\n return None",
"def reset_database():\n if os.path.exists(testinit.database_file):\n os.remove(testinit.database_file)\n shutil.copy(testinit.clean_db, testinit.database_file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get Kerberos details in a cluster.
|
def cluster_kerberos_info(self, cluster_name):
try:
r = None
if self.version == 7:
r = requests.get(
"{}://{}:{}/api/v40/clusters/{}/kerberosInfo".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
cluster_name,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
elif self.version == 6:
r = requests.get(
"{}://{}:{}/api/v30/clusters/{}/kerberosInfo".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
cluster_name,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
elif self.version == 5:
r = requests.get(
"{}://{}:{}/api/v18/clusters/{}/kerberosInfo".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
cluster_name,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
else:
self.logger.error("cluster_kerberos_info as cloudera does not exist")
r = None
if r.status_code == 200:
cluster_kerberos_info = r.json()
kerberized_status = str(cluster_kerberos_info["kerberized"])
if kerberized_status == "True":
cluster_kerberos_info = "Cluster is kerberized"
else:
cluster_kerberos_info = "Cluster is not kerberized"
self.logger.info("cluster_kerberos_info successful")
return cluster_kerberos_info
else:
self.logger.error(
"cluster_kerberos_info failed due to invalid API call. HTTP Response: "
+ str(r.status_code)
)
return None
except Exception as e:
self.logger.error("cluster_kerberos_info failed", exc_info=True)
return None
|
[
"def kerberos_http_auth(self):\n\n try:\n r = None\n if self.version == 7:\n r = requests.get(\n \"{}://{}:{}/api/v40/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 6:\n r = requests.get(\n \"{}://{}:{}/api/v30/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 5:\n r = requests.get(\n \"{}://{}:{}/api/v18/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n else:\n self.logger.error(\"kerberos_http_auth as cloudera does not exist\")\n r = None\n if r.status_code == 200:\n keytab1 = r.json()\n if len(keytab1[\"items\"]) > 0:\n keytab = \"keytab exist\"\n else:\n keytab = \"keytab not exist\"\n keytab1 = keytab1[\"items\"]\n new_list = []\n for i in range(0, len(keytab1)):\n dt = keytab1[i].split(\"/\", 1)\n neww_list = new_list.append(dt[0])\n new_list = [x.lower() for x in new_list]\n\n if \"hue\" in new_list:\n hue_flag = \"Kerberos on hue is enabled\"\n else:\n hue_flag = \"Kerberos on hue is not enabled\"\n\n if \"yarn\" in new_list:\n yarn_flag = \"Kerberos on yarn is enabled\"\n else:\n yarn_flag = \"Kerberos on yarn is not enabled\"\n\n if \"mapred\" in new_list:\n mapred_flag = \"Kerberos on mapreduce is enabled\"\n else:\n mapred_flag = \"Kerberos on mapreduce is not enabled\"\n\n if \"hdfs\" in new_list:\n hdfs_flag = \"Kerberos on HDFS is enabled\"\n else:\n hdfs_flag = \"Kerberos on HDFS is not enabled\"\n\n self.logger.info(\"kerberos_http_auth successful\")\n return hue_flag, mapred_flag, hdfs_flag, yarn_flag, keytab\n else:\n self.logger.error(\n \"kerberos_http_auth failed due to invalid API call. HTTP Response: \"\n + str(r.status_code)\n )\n return None\n except Exception as e:\n self.logger.error(\"kerberos_http_auth failed\", exc_info=True)\n return None",
"def aks_connect(rg, cluster):\n az(\"aks get-credentials --resource-group {} --name {}\".format(rg, cluster))",
"def try_configuration(self) -> None:\n with self.context():\n kerberos.getServerPrincipalDetails(self.service, self.hostname)",
"def cluster_information(self):\n path = \"/ws/v1/cluster/info\"\n return self.request(path)",
"def get_cluster_info(self):\n response = requests.get(self.url)\n response.raise_for_status()\n return response.json()",
"def cli_cosmosdb_mongocluster_get(client,\r\n resource_group_name, cluster_name):\r\n\r\n return client.get(resource_group_name, cluster_name)",
"def get_kerberos_servers(self, ad=None):\n if ad is None:\n ad = self.middleware.call_sync('activedirectory.config')\n AD_DNS = ActiveDirectory_DNS(conf=ad, logger=self.logger)\n krb_kdc = AD_DNS.get_n_working_servers(SRV['KERBEROSDOMAINCONTROLLER'], 3)\n krb_admin_server = AD_DNS.get_n_working_servers(SRV['KERBEROS'], 3)\n krb_kpasswd_server = AD_DNS.get_n_working_servers(SRV['KPASSWD'], 3)\n kdc = [i['host'] for i in krb_kdc]\n admin_server = [i['host'] for i in krb_admin_server]\n kpasswd = [i['host'] for i in krb_kpasswd_server]\n for servers in [kdc, admin_server, kpasswd]:\n if len(servers) == 1:\n return None\n\n return {\n 'krb_kdc': ' '.join(kdc),\n 'krb_admin_server': ' '.join(admin_server),\n 'krb_kpasswd_server': ' '.join(kpasswd)\n }",
"def get_kerberos_k8s_config(\n secrets_store: REANAUserSecretsStore, kubernetes_uid: int\n) -> KerberosConfig:\n secrets_volume_mount = secrets_store.get_secrets_volume_mount_as_k8s_spec()\n keytab_file = secrets_store.get_secret_value(\"CERN_KEYTAB\")\n cern_user = secrets_store.get_secret_value(\"CERN_USER\")\n\n if not keytab_file:\n raise REANASecretDoesNotExist(missing_secrets_list=[\"CERN_KEYTAB\"])\n if not cern_user:\n raise REANASecretDoesNotExist(missing_secrets_list=[\"CERN_USER\"])\n\n ticket_cache_volume = {\n \"name\": \"krb5-cache\",\n \"emptyDir\": {},\n }\n krb5_config_volume = {\n \"name\": \"krb5-conf\",\n \"configMap\": {\"name\": KRB5_CONFIGMAP_NAME},\n }\n volumes = [ticket_cache_volume, krb5_config_volume]\n\n volume_mounts = [\n {\n \"name\": ticket_cache_volume[\"name\"],\n \"mountPath\": KRB5_TOKEN_CACHE_LOCATION,\n },\n {\n \"name\": krb5_config_volume[\"name\"],\n \"mountPath\": \"/etc/krb5.conf\",\n \"subPath\": \"krb5.conf\",\n },\n ]\n\n env = [\n {\n \"name\": \"KRB5CCNAME\",\n \"value\": os.path.join(\n KRB5_TOKEN_CACHE_LOCATION,\n KRB5_TOKEN_CACHE_FILENAME.format(kubernetes_uid),\n ),\n }\n ]\n\n # Kerberos init container generates ticket to access external services\n krb5_init_container = {\n \"image\": KRB5_CONTAINER_IMAGE,\n \"command\": [\n \"kinit\",\n \"-kt\",\n f\"/etc/reana/secrets/{keytab_file}\",\n f\"{cern_user}@CERN.CH\",\n ],\n \"name\": KRB5_INIT_CONTAINER_NAME,\n \"imagePullPolicy\": \"IfNotPresent\",\n \"volumeMounts\": [secrets_volume_mount] + volume_mounts,\n \"env\": env,\n \"securityContext\": {\"runAsUser\": kubernetes_uid},\n }\n\n # Kerberos renew container renews ticket periodically for long-running jobs\n krb5_renew_container = {\n \"image\": KRB5_CONTAINER_IMAGE,\n \"command\": [\"bash\", \"-c\"],\n \"args\": [\n (\n \"SECONDS=0; \"\n f\"while ! test -f {KRB5_STATUS_FILE_LOCATION}; do \"\n f\"if [ $SECONDS -ge {KRB5_TICKET_RENEW_INTERVAL} ]; then \"\n 'echo \"Renewing Kerberos ticket: $(date)\"; kinit -R; SECONDS=0; fi; '\n f\"sleep {KRB5_STATUS_FILE_CHECK_INTERVAL}; done\"\n )\n ],\n \"name\": KRB5_RENEW_CONTAINER_NAME,\n \"imagePullPolicy\": \"IfNotPresent\",\n \"volumeMounts\": [secrets_volume_mount] + volume_mounts,\n \"env\": env,\n \"securityContext\": {\"runAsUser\": kubernetes_uid},\n }\n\n return KerberosConfig(\n volumes, volume_mounts, env, krb5_init_container, krb5_renew_container\n )",
"def get_cluster():\n return nifi.ControllerApi().get_cluster()",
"def get_clusters(self) -> List[Dict]:\n\n \"\"\"\n GET /v1/clusters HTTP/1.1\n Host: containers.bluemix.net\n Accept: application/json\n Authorization: [PRIVATE DATA HIDDEN]\n Content-Type: application/json\n X-Region: au-syd\n \"\"\"\n # returns 200 OK on success\n\n resp = self.session.get(\n \"{0}/v1/clusters\".format(self.endpoint_url),\n headers={\"X-Region\": self.region, \"Accept\": \"application/json\"},\n )\n\n if resp.status_code != 200:\n raise Exception(\n \"error getting clusters: code=%d body=%r\"\n % (resp.status_code, resp.text)\n )\n\n return resp.json()",
"def do_kube_cluster_list(cc, args):\n versions = cc.kube_cluster.list()\n fields = ['cluster_name', 'cluster_version', 'cluster_api_endpoint']\n labels = fields\n utils.print_list(versions, fields, labels, sortby=0)",
"def test_keystone_kerberos_authentication(self):\n logging.info('Retrieving a kerberos token with kinit for admin user')\n\n ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0]\n result = zaza.model.run_on_unit(ubuntu_test_host.name,\n \"echo password123 | kinit admin\")\n assert result['Code'] == '0', result['Stderr']\n\n logging.info('Changing token mod for user access')\n result = zaza.model.run_on_unit(\n ubuntu_test_host.name,\n \"sudo install -m 777 /tmp/krb5cc_0 /tmp/krb5cc_1000\"\n )\n assert result['Code'] == '0', result['Stderr']\n\n logging.info('Fetching user/project info in OpenStack')\n domain_name = 'k8s'\n project_name = 'k8s'\n keystone_session = openstack_utils.get_overcloud_keystone_session()\n keystone_client = openstack_utils.get_keystone_session_client(\n keystone_session)\n domain_id = keystone_client.domains.find(name=domain_name).id\n project_id = keystone_client.projects.find(name=project_name).id\n keystone_hostname = get_unit_full_hostname('keystone')\n\n logging.info('Retrieving an OpenStack token to validate auth')\n cmd = 'openstack token issue -f value -c id ' \\\n '--os-auth-url http://{}:5000/krb/v3 ' \\\n '--os-project-id {} ' \\\n '--os-project-name {} ' \\\n '--os-project-domain-id {} ' \\\n '--os-region-name RegionOne ' \\\n '--os-interface public ' \\\n '--os-identity-api-version 3 ' \\\n '--os-auth-type v3kerberos'.format(keystone_hostname,\n project_id,\n project_name,\n domain_id)\n\n result = zaza.model.run_on_unit(ubuntu_test_host.name, cmd)\n assert result['Code'] == '0', result['Stderr']",
"def get_cluster_names():\n token = get_session()\n headers = {\n 'Cookie': 'argocd.token={}'.format(token)\n }\n r = requests.get('https://build.osinfra.cn/api/v1/clusters', headers=headers)\n if r.status_code != 200:\n print('Cannot get cluster names because GET request failed.')\n print(r.status_code, r.json())\n sys.exit(1)\n cluster_names = []\n for i in r.json()['items']:\n cluster_names.append(i['name'])\n return cluster_names",
"def test_clusters_cluster_name_get(self):\n MockAmbari = Ambari\n Ambari.get_cluster_info = Mock(return_value={'cluster_name': 'cluster_name'})\n response = self.client.open(\n '/detapi/{version}/clusters/{cluster_name}'.format(version=__version__, \n cluster_name='cluster_name_example'),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def __get_token_gss(self):\n if not EXTRA_MODULES['requests_kerberos']:\n raise MissingModuleException('The requests-kerberos module is not installed.')\n\n url = build_url(self.auth_host, path='auth/gss')\n\n result = self._send_request(url, get_token=True, auth=HTTPKerberosAuth())\n\n if not result:\n self.logger.error('Cannot retrieve authentication token!')\n return False\n\n if result.status_code != codes.ok: # pylint: disable-msg=E1101\n exc_cls, exc_msg = self._get_exception(headers=result.headers,\n status_code=result.status_code,\n data=result.content)\n raise exc_cls(exc_msg)\n\n self.auth_token = result.headers['x-rucio-auth-token']\n return True",
"def retrieve_cluster_ca(self):\n url = f'{self.hostname}/pools/default/trustedCAs'\n return self._get(url)",
"def do_kube_cluster_show(cc, args):\n try:\n name = cc.kube_cluster.get(args.name)\n _print_kube_cluster_show(name)\n except exc.HTTPNotFound:\n raise exc.CommandError('kubernetes cluster not found: %s' % args.name)",
"def list_cluster_names(self):\n info = self._make_service_info()\n try:\n return sorted(self.huskar_client.client.get_children(info.path))\n except NoNodeError:\n return []",
"def get_cluster_props(redshift):\n print(\"- Waiting for the cluster to be available ...\")\n global DWH_CLUSTER_IDENTIFIER\n myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]\n # Busy wait until the cluster is created\n while myClusterProps[\"ClusterStatus\"] == \"creating\":\n time.sleep(30) # Sleep 30 sec\n myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]\n print(\"- Cluster is now available\")\n return myClusterProps"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get SSL staus of various services.
|
def ssl_status(self):
try:
path_status = path.exists("{}".format(self.config_path["hdfs"]))
if path_status == True:
xml_data = subprocess.Popen(
"cat {} | grep HTTPS_ONLY".format(self.config_path["hdfs"]),
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
xml_data.wait(10)
out, err = xml_data.communicate()
if out.find("HTTPS_ONLY") == -1:
hdfs_ssl = "SSL on HDFS is not enabled"
else:
hdfs_ssl = "SSL on HDFS is enabled"
else:
hdfs_ssl = None
path_status = path.exists("{}".format(self.config_path["yarn"]))
if path_status == True:
xml_data = subprocess.Popen(
"cat {} | grep HTTPS_ONLY".format(self.config_path["yarn"]),
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
xml_data.wait(10)
out, err = xml_data.communicate()
if out.find("HTTPS_ONLY") == -1:
yarn_ssl = "SSL on Yarn is not enabled"
else:
yarn_ssl = "SSL on Yarn is enabled"
else:
yarn_ssl = None
path_status = path.exists("{}".format(self.config_path["mapred"]))
if path_status == True:
xml_data = subprocess.Popen(
"cat {} | grep HTTPS_ONLY".format(self.config_path["mapred"]),
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
xml_data.wait(10)
out, err = xml_data.communicate()
if out.find("HTTPS_ONLY") == -1:
Mr_ssl = "SSL on Mapreduce is not enabled"
else:
Mr_ssl = "SSL on Mapreduce is enabled"
else:
Mr_ssl = None
self.logger.info("ssl_status successful")
return Mr_ssl, hdfs_ssl, yarn_ssl
except Exception as e:
self.logger.error("ssl_status failed", exc_info=True)
return None
|
[
"def ssl(self):\r\n return self.sslobj",
"def get_ssl_certs():\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n iam = connect_to_iam()\n certs = iam.get_all_server_certs()['list_server_certificates_response']['list_server_certificates_result']['server_certificate_metadata_list']\n for cert in certs:\n print cert['server_certificate_name']\n return certs",
"def get_ssl_options(self) -> Dict[str, Any]:\n return AsyncHTTPSTestCase.default_ssl_options()",
"def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving the SSL certificate cluster replication status\",\n \"/isam/ssl_certificates/?cluster=true\", requires_model=requires_model)",
"def status(logger, client):\n logger.info(client.manager.ssl_status())",
"def get(cls, service, name=\"\", option_=\"\") :\n try :\n obj = ssl_stats()\n if not name :\n response = obj.stat_resources(service, option_)\n return response\n except Exception as e:\n raise e",
"def is_ssl(self):\n\t\treturn self.ssl",
"def sslenginestatus(self) :\n try :\n return self._sslenginestatus\n except Exception as e:\n raise e",
"def HasSSL(self):\n return self.__has('SSL')",
"def enable_ssl(self):\n # type: () -> bool\n return self._get_property('enable_ssl')",
"def ssl():\n pass",
"def san_certs(self):\n return self.get('san_certs')",
"def get_ssl_ca_certs(self):\n return self._ssl_ca_certs",
"def ssl(self):\n\t\tif 'with_openssl' in self.configure_options:\n\t\t\treturn True\n\t\t# Parameterized form in newer versions.\n\t\tfor x in self.configure_options:\n\t\t\tif 'with_ssl' in x:\n\t\t\t\treturn True\n\t\treturn False",
"def _get_an_active_slaver(self):\n # 这个是什么意思?\n try_count = 10\n while not self._stopped['stop']:\n try:\n logging.info(\"master _get_an_active_slaver self.slaver_pool:{},{}\".format(id(self.slaver_pool), self.slaver_pool))\n dict_slaver = self.slaver_pool.popleft()\n except:\n if try_count:\n time.sleep(0.02)\n try_count -= 1\n if try_count % 10 == 0:\n log.error(\"!!NO SLAVER AVAILABLE!! trying {}\".format(try_count))\n continue\n return None\n\n conn_slaver = dict_slaver[\"conn_slaver\"]\n\n try:\n hs = self._handshake(conn_slaver)\n except Exception as e:\n log.warning(\"Handshake failed: {},key:{},{},{},{}\".format(e,id(self),self.pkg.skey,self.pkg.SECRET_KEY_CRC32,self.pkg.SECRET_KEY_REVERSED_CRC32))\n log.debug(traceback.format_exc())\n hs = False\n\n if hs:\n return conn_slaver\n else:\n log.warning(\"slaver handshake failed: {}\".format(dict_slaver[\"addr_slaver\"]))\n try_close(conn_slaver)\n\n time.sleep(0.02)",
"def sslbetottlsv1sessions(self) :\n try :\n return self._sslbetottlsv1sessions\n except Exception as e:\n raise e",
"def sslservice_sslcertkey_bindings(self) :\n\t\ttry :\n\t\t\treturn self._sslservice_sslcertkey_binding\n\t\texcept Exception as e:\n\t\t\traise e",
"def sslservice_sslcipher_bindings(self) :\n\t\ttry :\n\t\t\treturn self._sslservice_sslcipher_binding\n\t\texcept Exception as e:\n\t\t\traise e",
"def sslservice_sslciphersuite_bindings(self) :\n\t\ttry :\n\t\t\treturn self._sslservice_sslciphersuite_binding\n\t\texcept Exception as e:\n\t\t\traise e"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get kerberos status of various services.
|
def kerberos_http_auth(self):
try:
r = None
if self.version == 7:
r = requests.get(
"{}://{}:{}/api/v40/cm/kerberosPrincipals".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
elif self.version == 6:
r = requests.get(
"{}://{}:{}/api/v30/cm/kerberosPrincipals".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
elif self.version == 5:
r = requests.get(
"{}://{}:{}/api/v18/cm/kerberosPrincipals".format(
self.http,
self.cloudera_manager_host_ip,
self.cloudera_manager_port,
),
auth=HTTPBasicAuth(
self.cloudera_manager_username, self.cloudera_manager_password
),
verify=False,
)
else:
self.logger.error("kerberos_http_auth as cloudera does not exist")
r = None
if r.status_code == 200:
keytab1 = r.json()
if len(keytab1["items"]) > 0:
keytab = "keytab exist"
else:
keytab = "keytab not exist"
keytab1 = keytab1["items"]
new_list = []
for i in range(0, len(keytab1)):
dt = keytab1[i].split("/", 1)
neww_list = new_list.append(dt[0])
new_list = [x.lower() for x in new_list]
if "hue" in new_list:
hue_flag = "Kerberos on hue is enabled"
else:
hue_flag = "Kerberos on hue is not enabled"
if "yarn" in new_list:
yarn_flag = "Kerberos on yarn is enabled"
else:
yarn_flag = "Kerberos on yarn is not enabled"
if "mapred" in new_list:
mapred_flag = "Kerberos on mapreduce is enabled"
else:
mapred_flag = "Kerberos on mapreduce is not enabled"
if "hdfs" in new_list:
hdfs_flag = "Kerberos on HDFS is enabled"
else:
hdfs_flag = "Kerberos on HDFS is not enabled"
self.logger.info("kerberos_http_auth successful")
return hue_flag, mapred_flag, hdfs_flag, yarn_flag, keytab
else:
self.logger.error(
"kerberos_http_auth failed due to invalid API call. HTTP Response: "
+ str(r.status_code)
)
return None
except Exception as e:
self.logger.error("kerberos_http_auth failed", exc_info=True)
return None
|
[
"def smb_service_status(mnode):\n g.log.info(\"Getting SMB Service status on %s\", mnode)\n return g.run(mnode, \"service smb status\")",
"def get_kerberos_servers(self, ad=None):\n if ad is None:\n ad = self.middleware.call_sync('activedirectory.config')\n AD_DNS = ActiveDirectory_DNS(conf=ad, logger=self.logger)\n krb_kdc = AD_DNS.get_n_working_servers(SRV['KERBEROSDOMAINCONTROLLER'], 3)\n krb_admin_server = AD_DNS.get_n_working_servers(SRV['KERBEROS'], 3)\n krb_kpasswd_server = AD_DNS.get_n_working_servers(SRV['KPASSWD'], 3)\n kdc = [i['host'] for i in krb_kdc]\n admin_server = [i['host'] for i in krb_admin_server]\n kpasswd = [i['host'] for i in krb_kpasswd_server]\n for servers in [kdc, admin_server, kpasswd]:\n if len(servers) == 1:\n return None\n\n return {\n 'krb_kdc': ' '.join(kdc),\n 'krb_admin_server': ' '.join(admin_server),\n 'krb_kpasswd_server': ' '.join(kpasswd)\n }",
"def status():\n lines = os.popen(\"ps ef | grep mlcomp\").readlines()\n pids = {}\n for line in lines:\n if \"mlcomp/configs/supervisord.conf\" in line:\n pids[\"server\"] = line\n elif \"mlcomp-server start-site\" in line:\n pids[\"site\"] = line\n elif \"redis-server\" in line:\n pids[\"redis\"] = line\n if not pids:\n print(\"There are no mlcomp services started\")\n return\n text = \"Current MLComp services status:\\n\"\n for k, v in pids.items():\n text += f\" (✔) {k} is started on pid {v.split()[0]}\\n\"\n print(text)",
"def configure_services(cluster):\n services = cluster.get_all_services()\n\n for service in services:\n service_type = service.type\n if service_type == 'HDFS':\n print \"Configuring HDFS for Kerberos.\"\n service.update_config(\n {'hadoop_security_authentication': 'kerberos',\n 'hadoop_security_authorization': 'true'}\n )\n\n role_cfgs = service.get_all_role_config_groups()\n\n for role_cfg in role_cfgs:\n if role_cfg.roleType == 'DATANODE':\n role_cfg.update_config(\n {'dfs_datanode_port': '1004',\n 'dfs_datanode_http_port': '1006',\n 'dfs_datanode_data_dir_perm': '700'}\n )\n elif service_type == 'HBASE':\n print \"Configuring HBase for Kerberos.\"\n service.update_config(\n {'hbase_security_authentication': 'kerberos',\n 'hbase_security_authorization': 'true'}\n )\n elif service_type == 'ZOOKEEPER':\n print \"Configuring ZooKeeper for Kerberos.\"\n service.update_config(\n {'enableSecurity': 'true'}\n )\n elif service_type == 'SOLR':\n print \"Configuring Solr for Kerberos.\"\n service.update_config(\n {'solr_security_authentication': 'kerberos'}\n )\n elif service_type == 'KS_INDEXER':\n # API version 10 came out with CM 5.4, which is necessary to make this configuration\n # change.\n if API_CURRENT_VERSION >= 10:\n print \"Configuring KeyStoreIndexer for Kerberos.\"\n service.update_config(\n {'hbase_indexer_security_authentication': 'kerberos'}\n )\n elif service_type == 'HUE':\n kt_renewer_role = service.get_roles_by_type('KT_RENEWER')\n hue_server_role = service.get_roles_by_type('HUE_SERVER')\n\n if hue_server_role and not kt_renewer_role:\n print \"Configuring Hue for Kerberos.\"\n service.create_role('KT_RENEWER-1', 'KT_RENEWER',\n hue_server_role[0].hostRef.hostId)",
"def get_cluster_status(self):\n status = {}\n groups = {}\n for line in self.run(\"/opt/VRTS/bin/hastatus -sum\", filter='^\\w.*'):\n parts = line.split()\n # 'A' lines are the systems. Output fields are: \"A\" System State Frozen\n if parts[0] == 'A':\n status[parts[1]] = {'state': parts[2], 'frozen': parts[3] != '0'}\n # 'B' lines are the group states. Output fields are: \"B\" Group System Probed AutoDisabled State\n elif parts[0] == 'B':\n #status[parts[2]]['groups'].append({'name': parts[1], 'probed': parts[3] == 'Y', 'autodisabled': parts[4] == 'Y', 'state': parts[5]})\n status[parts[2]][parts[1]] = {'probed': parts[3] == 'Y', 'autodisabled': parts[4] == 'Y', 'state': parts[5]}\n groups[parts[1]] = ''\n # update the group list. easier this way\n self.groups = groups.keys()\n return status",
"def cluster_kerberos_info(self, cluster_name):\n\n try:\n r = None\n if self.version == 7:\n r = requests.get(\n \"{}://{}:{}/api/v40/clusters/{}/kerberosInfo\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n cluster_name,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 6:\n r = requests.get(\n \"{}://{}:{}/api/v30/clusters/{}/kerberosInfo\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n cluster_name,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 5:\n r = requests.get(\n \"{}://{}:{}/api/v18/clusters/{}/kerberosInfo\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n cluster_name,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n else:\n self.logger.error(\"cluster_kerberos_info as cloudera does not exist\")\n r = None\n if r.status_code == 200:\n cluster_kerberos_info = r.json()\n kerberized_status = str(cluster_kerberos_info[\"kerberized\"])\n if kerberized_status == \"True\":\n cluster_kerberos_info = \"Cluster is kerberized\"\n else:\n cluster_kerberos_info = \"Cluster is not kerberized\"\n self.logger.info(\"cluster_kerberos_info successful\")\n return cluster_kerberos_info\n else:\n self.logger.error(\n \"cluster_kerberos_info failed due to invalid API call. HTTP Response: \"\n + str(r.status_code)\n )\n return None\n except Exception as e:\n self.logger.error(\"cluster_kerberos_info failed\", exc_info=True)\n return None",
"def get_monit_service_status(duthost, service):\n result = duthost.shell(\"sudo monit status -B\", module_ignore_errors=True, verbose=False)\n if result[\"rc\"] != 0:\n return {}\n\n services = parse_monit_output(result[\"stdout_lines\"])\n return services[service]",
"def status(gvar):\n\n mandatory = []\n required = []\n optional = ['-cn', '-CSEP', '-CSV', '-g', '-H', '-h', '-NV', '-o', '-ok', '-r', '-s', '-V', '-VC', '-v', '-w', '-x509', '-xA']\n\n if gvar['retrieve_options']:\n return mandatory + required + optional\n\n # Check for missing arguments or help required.\n check_keys(gvar, mandatory, required, optional)\n\n # Retrieve data (possibly after changing the group).\n response = requests(gvar, '/cloud/status/')\n\n # Filter response as requested (or not).\n cloud_status_list = _filter_by_cloud_name_and_or_metadata_name(gvar, response['cloud_status_list'])\n\n # Print report\n show_active_user_groups(gvar, response)\n\n columns = [\n 'group_name/Group,k',\n 'Jobs',\n 'Idle',\n 'Running',\n 'Completed',\n 'Other',\n 'foreign/Foreign',\n 'htcondor_status/HTCondor/Status',\n 'agent_status/Agent/Status',\n 'htcondor_fqdn/HTCondor FQDN',\n 'condor_days_left/Condor/Days Left on Certificates',\n 'worker_days_left/Worker/Days Left on Certificates',\n ]\n\n if response['jobs_by_target_alias_flag']:\n columns.insert(1, 'target_alias/Target Alias,k')\n\n show_table(\n gvar,\n response['job_status_list'],\n columns,\n title=\"Job status\",\n )\n\n show_table(\n gvar,\n cloud_status_list,\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'enabled/Enabled/Defaults',\n 'default_flavor/Flavor/Defaults',\n 'default_image/Image/Defaults',\n 'keep_alive/Keep Alive/Defaults',\n 'communication_up/Up/Communications',\n 'communication_rt/Request Time/Communications',\n 'VMs_quota/Quota/VMs',\n 'VMs/Total/VMs',\n 'VMs_starting/Starting/VMs',\n 'VMs_unregistered/Unregistered/VMs',\n 'VMs_idle/idle/VMs',\n 'VMs_running/Running/VMs',\n 'VMs_retiring/Retiring/VMs',\n 'VMs_manual/Manual/VMs',\n 'VMs_in_error/Error/VMs',\n 'VMs_other/Other/VMs',\n 'cores_quota/Quota/Cores',\n 'cores_limit/Limit/Cores',\n 'cores_ctl/Setting/Cores',\n 'cores_idle/Idle/Cores',\n 'cores_native/Used/Cores',\n 'ram_quota/Quota/RAM',\n 'ram_limit/Limit/RAM',\n 'ram_ctl/Setting/RAM',\n 'ram_idle/Idle/RAM',\n 'ram_native/Used/RAM',\n 'slot_count/Busy/Condor Slots',\n 'slot_core_count/Busy Cores/Condor Slots',\n 'slot_idle_core_count/Idle Cores/Condor Slots',\n 'Foreign_VMs/VMs/Foreign',\n 'cores_foreign/Cores/Foreign',\n 'ram_foreign/RAM/Foreign',\n ],\n title=\"Cloud status\",\n )\n\n show_table(\n gvar,\n response['flavor_slot_detail_summary'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'flavor/Flavor,k',\n 'slot_type/Type/Slot',\n 'slot_count/Count/Slot',\n 'core_count/Core Count',\n ],\n title=\"Flavor Slot Detail Summary\",\n optional=True,\n )\n\n show_table(\n gvar,\n response['flavor_slot_detail'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'flavor/Flavor,k',\n 'slot_type/Type/Slot',\n 'slot_id/ID/Slot',\n 'slot_count/Count/Slot',\n 'core_count/Core Count',\n ],\n title=\"Flavor Slot Detail\",\n optional=True,\n )\n\n show_table(\n gvar,\n response['flavor_slot_summary'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'flavor/Flavor,k',\n 'busy/Busy/Cores',\n 'idle/Idle/Cores',\n 'idle_percent/Idle Percent/Cores',\n ],\n title=\"Flavor Slot Summary\",\n optional=True,\n )\n\n show_table(\n gvar,\n response['slot_detail_summary'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'slot_type/Type/Slot',\n 'slot_count/Count/Slot',\n 'core_count/Core Count',\n ],\n title=\"Slot Detail Summary\",\n optional=True,\n )\n\n show_table(\n gvar,\n response['slot_detail'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'slot_type/Type/Slot',\n 'slot_id/ID/Slot',\n 'slot_count/Count/Slot',\n 'core_count/Core Count',\n ],\n title=\"Slot Detail\",\n optional=True,\n )\n\n show_table(\n gvar,\n response['slot_summary'],\n [\n 'group_name/Group,k',\n 'cloud_name/Cloud,k',\n 'busy/Busy/Cores',\n 'idle/Idle/Cores',\n 'idle_percent/Idle Percent/Cores',\n ],\n title=\"Slot Summary\",\n optional=True,\n )",
"def get_servicesinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Firewall\n try:\n fw = ''\n firewalld = get_service(ns, 'firewalld')\n if firewalld and firewalld.Status == 'OK':\n fw = 'on (firewalld)'\n else:\n iptables = get_service(ns, 'iptables')\n if iptables and iptables.Status == 'OK':\n fw = 'on (iptables)'\n if not fw:\n fw = 'off'\n except Exception:\n fw = 'N/A'\n tf.produce_output([('Firewall:', fw)])\n\n # Logging\n try:\n logging = ''\n journald = get_service(ns, 'systemd-journald')\n if journald and journald.Status == 'OK':\n logging = 'on (journald)'\n else:\n rsyslog = get_service(ns, 'rsyslog')\n if rsyslog and rsyslog.Status == 'OK':\n logging = 'on (rsyslog)'\n if not logging:\n logging = 'off'\n except Exception:\n logging = 'N/A'\n tf.produce_output([('Logging:', logging)])\n\n return []",
"def status(self):\n status = 3\n with open(\"/proc/mounts\") as f:\n for line in f.readlines():\n if line.startswith(\"fuse_kafka\"):\n print \"listening on \" + line.split()[1]\n status = 0\n sys.stdout.write(\"service is \")\n if status == 3: sys.stdout.write(\"not \")\n print(\"running\")\n sys.exit(status)",
"def status(self, name=''):\n super(SystemD, self).status(name=name)\n\n svc_list = sh.systemctl('--no-legend', '--no-pager', t='service')\n svcs_info = [self._parse_service_info(svc) for svc in svc_list]\n if name:\n names = (name, name + '.service')\n # return list of one item for specific service\n svcs_info = [s for s in svcs_info if s['name'] in names]\n self.services['services'] = svcs_info\n return self.services",
"def status(self):\n \n return self._make_request(\"server/status\").json()",
"def advapi32_QueryServiceLockStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"lpLockStatus\", \"cbBufSize\", \"pcbBytesNeeded\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_service_statuses():\n\n # We'll collect the statuses for the service in a list.\n # Note: increasing the \"minutes\" value will reduce the chances of an\n # getting no status, but also potentially might give a late result\n client = get_monasca_client()\n parms = {\n \"name\": \"http_status\",\n \"start_time\":\n (datetime.utcnow() - timedelta(minutes=1)).isoformat(),\n \"group_by\": \"service\"\n }\n\n measurements = None\n try:\n measurements = client.metrics.list_measurements(**parms)\n if not measurements:\n LOG.error(\"Empty measurements from Monasca\")\n abort(404, \"Unable to retrieve any statuses\")\n except Exception as e:\n LOG.error(\"Unable to access Monasca: %s\" % e)\n abort(503, \"Monasca service unavailable\")\n\n statuses = []\n for m in measurements:\n service = m['dimensions']['service']\n # we get the last measurement value, which is also the latest\n val_idx = m['columns'].index('value')\n if not m['measurements']:\n status = \"unknown\"\n else:\n value = m['measurements'][-1][val_idx]\n if value == 0:\n status = \"up\"\n else:\n status = \"down\"\n statuses.append({\n 'name': service,\n 'status': status\n })\n\n return jsonify(statuses)",
"def advapi32_EnumServicesStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"dwServiceType\", \"dwServiceState\", \"lpServices\", \"cbBufSize\", \"pcbBytesNeeded\", \"lpServicesReturned\", \"lpResumeHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_services(self):\n\n # try to get services\n try:\n\n # get services\n command = str('kubectl get services')\n subprocess.call(command.split())\n\n # handle exception\n except:\n\n # raise Exception\n raise Exception('I could not get the list of services')",
"def get_chassis_status():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command chassis status\")\n result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)\n\n return result",
"def service_names(self):\n return self.services.keys()",
"def getServicesInfo(self):\n res = self.serv.getServicesInfo()\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get list of encryption zone in cluster.
|
def encryption_zone(self):
try:
enc_zoneList = pd.DataFrame()
xml_data = subprocess.Popen(
"sudo hdfs crypto -listZones",
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
xml_data.wait(10)
out, err = xml_data.communicate()
if not out.strip():
enc_zoneList = None
else:
intermediate_out = out.splitlines()
intermediate_out.pop(-1)
splitted_search = [x.split("\n") for x in intermediate_out]
enc_zoneList = pd.DataFrame(splitted_search, columns=["data"])
enc_zoneList["data"] = enc_zoneList["data"].str.split(
" ", n=1, expand=True
)
self.logger.info("encryption_zone successful")
return enc_zoneList
except Exception as e:
self.logger.error("encryption_zone failed", exc_info=True)
return None
|
[
"def ex_list_zones(self):\r\n list_zones = []\r\n request = '/zones'\r\n response = self.connection.request(request, method='GET').object\r\n list_zones = [self._to_zone(z) for z in response['items']]\r\n return list_zones",
"def list_zones(self):\r\n return list(self.iterate_zones())",
"def get_zone_names_list(self):\n\n\t\treturn [zone['description'] for zone in self.compute.zones().list(project=self.project).execute()['items']]",
"def availability_zone_list(request):\n az_manager = moganclient(request).availability_zone\n return az_manager.list()",
"def get_availability_zones(region, credential):\n return [az.get(\"ZoneName\") for az in describe_availability_zones(region, credential)]",
"def get_zones():\n zonefld = Globals.app.GetDataFolder(\"ElmZone\")\n zones = zonefld.GetContents()\n #for zone in zones:\n #Globals.app.PrintPlain(zone)\n return zones",
"def _fetch_all_zones(self):\n query = tables.zones.select()\n return self.storage.session.execute(query).fetchall()",
"def _get_list_zone_object(self):\n return self.rad_connection.list_objects(zonemgr.Zone())",
"def list_monitoring_zones(self):\r\n return self._monitoring_zone_manager.list()",
"def _cluster_list():\n\n CLUSTER_TABLE = storage.get_cluster_table()\n clusters = []\n cluster_items = CLUSTER_TABLE.scan()\n\n for cluster in cluster_items['Items']:\n clusters.append(cluster['id'])\n\n print(f'tracked clusters: {clusters}')\n\n return clusters",
"def read_all_zones():\n rospy.wait_for_service('ReadAllProtectionZones')\n\n try:\n function_ReadAllProtectionZones = rospy.ServiceProxy('ReadAllProtectionZones', ReadAllProtectionZones)\n return function_ReadAllProtectionZones().output.protection_zones\n except rospy.ServiceException as e:\n print \"Service call failed : %s\" % e",
"def get_datacenters_list():\n return util.get(abs_link=False)",
"def list_zones():\n ret = salt.utils.mac_utils.execute_return_result(\"systemsetup -listtimezones\")\n zones = salt.utils.mac_utils.parse_return(ret)\n\n return [x.strip() for x in zones.splitlines()]",
"def list_locations(self):\r\n list_locations = []\r\n request = '/zones'\r\n response = self.connection.request(request, method='GET').object\r\n list_locations = [self._to_node_location(l) for l in response['items']]\r\n return list_locations",
"def get_cluster_list(self):\n LOG.info(\"Getting clusters\")\n return self.client.request(constants.GET,\n constants.GET_CLUSTER.format\n (self.server_ip), payload=None,\n querystring=constants.\n SELECT_ID_AND_NAME)",
"def iterate_zones(self):\r\n return self._get_more('zones')",
"def get_clusters(self) -> List[Dict]:\n\n \"\"\"\n GET /v1/clusters HTTP/1.1\n Host: containers.bluemix.net\n Accept: application/json\n Authorization: [PRIVATE DATA HIDDEN]\n Content-Type: application/json\n X-Region: au-syd\n \"\"\"\n # returns 200 OK on success\n\n resp = self.session.get(\n \"{0}/v1/clusters\".format(self.endpoint_url),\n headers={\"X-Region\": self.region, \"Accept\": \"application/json\"},\n )\n\n if resp.status_code != 200:\n raise Exception(\n \"error getting clusters: code=%d body=%r\"\n % (resp.status_code, resp.text)\n )\n\n return resp.json()",
"def cli_cosmosdb_managed_cassandra_cluster_list_backup(client,\r\n resource_group_name,\r\n cluster_name):\r\n return client.list_backups(resource_group_name, cluster_name)",
"def get_instances_in_zone(self, zone):\n\t\t\n\t\ttry:\n\t\t\tinstances = self.compute.instances().list(project=self.project, zone=zone).execute()['items']\n\t\texcept KeyError:\n\t\t\tinstances = []\n\t\treturn instances"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set bill payment id.
|
def set_bill_payment_id(self, bill_payment_id):
self.bill_payment_id = bill_payment_id
|
[
"def get_bill_payment_id(self):\n return self.bill_payment_id",
"def bill_number(self, bill_number):\n self._bill_number = bill_number",
"def setPayment(self, payment):\n self.payment = payment",
"def bitpay_invoice_id(self, bitpay_invoice_id):\n \n self._bitpay_invoice_id = bitpay_invoice_id",
"def paypal_id(self, paypal_id):\n\n self._paypal_id = paypal_id",
"def invoice_id(self, invoice_id):\n \n self._invoice_id = invoice_id",
"def set_is_single_bill_payment(self, is_single_bill_payment):\n self.is_single_bill_payment = is_single_bill_payment",
"def bank_terminal_id(self, bank_terminal_id):\n self._bank_terminal_id = bank_terminal_id",
"def stripe_transaction_id(self, stripe_transaction_id):\n\n self._stripe_transaction_id = stripe_transaction_id",
"def setDebt(self, amt):\n self.debt = amt",
"def set_stripe(self, stripe_id=0):\n self.stripe = stripe_id\n self.reb.set_stripes([self.stripe])",
"def payer_id(self, payer_id):\n\n self._payer_id = payer_id",
"def is_bill(self, is_bill):\n\n self._is_bill = is_bill",
"def month_id(self, month_id: str):\n\n self._month_id = month_id",
"def tax_id(self, value: str):\n self._tax_id = value\n self._dao.tax_id = value",
"def _id(self, _id):\n self.__id = _id",
"def with_payment_id(self, payment_id=0):\n payment_id = numbers.PaymentID(payment_id)\n if not payment_id.is_short():\n raise TypeError(\"Payment ID {0} has more than 64 bits and cannot be integrated\".format(payment_id))\n prefix = 54 if self.is_testnet() else 25 if self.is_stagenet() else 19\n data = bytearray([prefix]) + self._decoded[1:65] + struct.pack('>Q', int(payment_id))\n checksum = bytearray(keccak_256(data).digest()[:4])\n return IntegratedAddress(base58.encode(hexlify(data + checksum)))",
"def update_id(self,id):\n self.id = id",
"def set_payment_comment(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_PAYMENT_COMMENT).\n send_keys(value))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get bill payment id.
|
def get_bill_payment_id(self):
return self.bill_payment_id
|
[
"def payment_id(self):\n return numbers.PaymentID(hexlify(self._decoded[65:-4]).decode())",
"def bill_number(self):\n return self._bill_number",
"def bitpay_invoice_id(self):\n return self._bitpay_invoice_id",
"def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id = bill_payment_id",
"def getid(self, bno):\r\n return self.breakpt[bno]['id']",
"def id(self):\n return str(self._pr.number)",
"def get_by_id(self, payment_id):\n\n return self._get_request(Payments.GET_PAYMENT_BY_ID.format(payment_id))",
"def get_current_invoiceID() -> str:\n return DATABASE.get('/Invoices/currentInvoiceID', None)",
"def get_note(payment_id):\n if not has_note(payment_id):\n raise WalletException(\"Payment ID does not contain note\")\n # Get length\n length = int(payment_id[3:5], 16)\n\n # Get note\n hex_note = payment_id[5:length+5]\n note = bytearray.fromhex(hex_note).decode()\n\n # Get check sum and verify\n h = sha256()\n h.update(hex_note.encode())\n hash_digest = h.hexdigest()[:2]\n id_hash = payment_id[length+5:length+7]\n if id_hash != hash_digest:\n raise WalletException(\"Message checksum corrupt\")\n\n return note",
"def id_for_fund(fund):\n cur.execute(\"SELECT id FROM funds WHERE fund='{}'\".format(fund))\n return cur.fetchone()[0]",
"def generate_payment_id(self, note=None):\n if note is None:\n # Don't generate payment id's with note prefix to avoid confusion\n payment_id = binascii.b2a_hex(os.urandom(32)).decode()\n while payment_id[:3] == '1A4':\n payment_id = binascii.b2a_hex(os.urandom(32)).decode()\n return payment_id\n else:\n note = note.encode()\n if len(note) > 20:\n raise WalletException(\"Note is too long. Max of 20 characters.\")\n\n # Notes start with prefix 1A4\n prefix = '1A4'\n\n # Encode note and get length\n hex_note = binascii.hexlify(note)\n note_length = hex(len(hex_note.decode()))[2:]\n if len(note_length) < 2:\n note_length = '0'+note_length\n\n # Add simple checksum\n h = sha256()\n h.update(hex_note)\n hash_digest = h.hexdigest()[:2]\n\n # Convert bytes to string\n hex_note = hex_note.decode()\n\n # Add random chars to end\n total_length = len(prefix) + len(hex_note) + len(note_length) + len(hash_digest)\n remaining_length = 64 - total_length\n random_string = binascii.b2a_hex(os.urandom(remaining_length))[:remaining_length].decode()\n\n # Construct final string\n payment_id = prefix + note_length + hex_note + hash_digest + random_string\n return payment_id",
"def get_is_single_bill_payment(self):\n return self.is_single_bill_payment",
"def debit_note_uid(self):\n return self._debit_note_uid",
"def get_tran_pay_key(data):\n txn_id = data['parent_txn_id'] if 'parent_txn_id' in data else data['txn_id']\n try:\n txn_details = fetch_transaction_details(txn_id)\n except exceptions.PayPalError:\n logger.critical(\"PaymentDetails API call failed\")\n return None\n else:\n return txn_details.pay_key",
"def person_id(self):\n\n if self.s3_logged_in():\n record = self.db(self.db.pr_person.uuid == self.user.person_uuid\n ).select(self.db.pr_person.id,\n limitby=(0,1)\n ).first()\n if record:\n return record.id\n return None",
"def broker_id(self) -> float:\n return pulumi.get(self, \"broker_id\")",
"def get_banking_id(self, cid):\n query=sql.SQL(\"SELECT (id) FROM banking WHERE client_id={cid} LIMIT 1 FOR UPDATE SKIP LOCKED;\").\\\n format(cid=sql.Literal(cid))\n self.db_log.debug(query)\n self.cur.execute(query)\n return self.cur.fetchone()[0]\n #return pd.read_sql(query, self.conn).ix[0]",
"def csv_transaction_id(credit):\n return credit['id'] + 100000000",
"def goods_receipt_id(self):\n return self._goods_receipt_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set paid through account id.
|
def set_paid_through_account_id(self, paid_through_account_id):
self.paid_through_account_id = paid_through_account_id
|
[
"def save_account_id(self, account_id):\n self.wepay_account_id = account_id\n self.save()",
"def set_paid(self):\n\n self.paid = True",
"def payBooking(self, selectedBooking):\n selectedBooking.setPaid(True)",
"def paypal_id(self, paypal_id):\n\n self._paypal_id = paypal_id",
"def payer_id(self, payer_id):\n\n self._payer_id = payer_id",
"def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)",
"def setCommPaid(self, user):\n Commission.objects.filter(\n handyman=user,\n is_paid=False\n ).update(is_paid=True, paidout_date=timezone.now())\n return True",
"def account_id_secretary(self, account_id_secretary: int):\n\n self._account_id_secretary = account_id_secretary",
"def count_paid(self, count_paid):\n\n self._count_paid = count_paid",
"def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id = bill_payment_id",
"def put(self, data):\n context = pecan.request.context\n check_policy(context, 'uos_sales_admin')\n\n user_id = self.user_id\n conn = pecan.request.db_conn\n conn.set_accounts_salesperson(context, [user_id], data.sales_id)",
"def bitpay_invoice_id(self, bitpay_invoice_id):\n \n self._bitpay_invoice_id = bitpay_invoice_id",
"def set_public_id(self):\r\n\r\n self.public_id = get_public_id(f\"{self.id}_sociallink\")\r\n self.save()",
"def set_public_id(self):\r\n\r\n self.public_id = get_public_id(f\"{self.id}_user\")\r\n self.save()",
"def invoice_paid(self):\n self._update(\"subscription_status\", \"invoice_paid\")\n self._update(\"is_paying\", True)",
"def partner_id(self, partner_id: UserId):\n\n self._partner_id = partner_id",
"def set_participant_id(self, pid):\n self.participant_id = pid",
"def buy_member_id(self, buy_member_id):\n\n self._buy_member_id = buy_member_id",
"def set_user_id(self,user_id):\n self.user_id = user_id",
"def mark_paid(self):\n\t\tlogger.info('Marking bill %s as paid' % self.name)\n\t\tself.amount_due = 0.0\n\t\tself.due = False\n\t\tself.paid = True\n\t\tself.overdue = False\n\t\tstatus = self.get_status_as_str()\n\t\tlogger.debug(status)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get paid through account id.
|
def get_paid_through_account_id(self):
return self.paid_through_account_id
|
[
"def get_account_id(self):\n return self.wepay_account_id",
"def account_id(self): # DG: renamed\n pass",
"def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]",
"def get_paid_through_account_name(self, paid_through_acount_name):\n return self.paid_through_account_name",
"def account_id(self):\n\n return self._account_id.value",
"def get_account_id(event):\n return event['account']",
"def get_account_id(self, budget_id, account_name):\n # get the accounts from YNAb\n accounts = self.get_accounts(budget_id)\n results = [account.id for account in accounts if account.name == account_name]\n if not results:\n return None\n return results[0]",
"def provider_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"provider_account_id\")",
"def account_id_secretary(self) -> int:\n return self._account_id_secretary",
"def get_bill_payment_id(self):\n return self.bill_payment_id",
"def get_account(self):\n return self.fetch_data(\"account\")",
"def accountIdFetch(self, steamid: int):\n accountid = SteamID(steamid).as_32\n return accountid",
"def account(self):\n return self.__account",
"def receivables_account(self) -> Account:\n row = AccountEntry.objects.filter(source_invoice=self).order_by('id').first()\n return row.account if row else None",
"def getAccountNumber(self):\n return self._acctNo",
"def get_account_number(arn):\n return arn.split(\":\")[4]",
"def payment_id(self):\n return numbers.PaymentID(hexlify(self._decoded[65:-4]).decode())",
"def getAccount(self):\n result = self.getAccounts(1)\n if len(result) < 1:\n return None\n else:\n return result[0]",
"def person_id(self):\n\n if self.s3_logged_in():\n record = self.db(self.db.pr_person.uuid == self.user.person_uuid\n ).select(self.db.pr_person.id,\n limitby=(0,1)\n ).first()\n if record:\n return record.id\n return None",
"def get(account_id: str):\n return Account.query.get(account_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set paid through account name.
|
def set_paid_through_account_name(self, paid_through_account_name):
self.paid_through_account_name = paid_through_account_name
|
[
"def get_paid_through_account_name(self, paid_through_acount_name):\n return self.paid_through_account_name",
"def autoname(self):\n\t\tif not self.email_account_name:\n\t\t\tself.email_account_name = (\n\t\t\t\tself.email_id.split(\"@\", 1)[0].replace(\"_\", \" \").replace(\".\", \" \").replace(\"-\", \" \").title()\n\t\t\t)\n\n\t\tself.name = self.email_account_name",
"def set_user_name(self, user_name): \n self.user_name = user_name",
"def name_user(self, user, name):\r\n\t\tuser.name = name\r\n\t\tself.usernames[name] = user",
"def SetCustomName(self, givenName): # real signature unknown; restored from __doc__\n pass",
"def set_player_name(name):\n\n player[\"player_name\"] = name",
"def legal_name(self, value: str):\n self._legal_name = value\n self._dao.legal_name = value",
"def set_player_name(self, player):\r\n self.__name = player",
"def change_name(self, name):\n self._player_name = name",
"def changeName(self, uid, acc_num, account_name):\n with open('model/account_model.json', 'r+') as json_file:\n data = json.load(json_file)\n for index, account in enumerate(data):\n if (account['uid'] == uid) and (account['acc_num'] == acc_num):\n data[index]['acc_name'] = str(account_name)\n json_file.seek(0)\n json.dump(data, json_file, indent=4)\n return True",
"def __link_account(self, firstname: str, lastname: str) -> None:\r\n self.user_account = MyBankAccount(firstname, lastname)",
"def set_name(self, new_name):\n self.name = new_name",
"def _setName(self,name,value):\n\n if name in SDS['COP']:\n self.COP.__dict__[name] = value\n else:\n self.__dict__[name] = value",
"def set_profile(self, name):\n self.profile = name\n self.is_init = False",
"def account_name(self) -> str:\n return self['accountName']",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def setLastName(self, name=\"\"):\n\t\tself.lastName = name",
"def setUserName(*args, **kwargs):\n \n pass",
"def _username(self, new_username):\n self.__username = new_username",
"def set_name_item(self, item_name):\n self.name_item = item_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get paid through account name.
|
def get_paid_through_account_name(self, paid_through_acount_name):
return self.paid_through_account_name
|
[
"def account_name(self) -> str:\n return self['accountName']",
"def get_account_by_name(self, name: str): \r\n return self.accounts[name] if name in self.accounts else None",
"def account_name(session):\r\n iam = session.client('iam')\r\n account_name = \"Null\"\r\n response = iam.list_account_aliases()\r\n logger.info('account_name response:' + str(response))\r\n if 'AccountAliases' in response and response['AccountAliases']:\r\n account_name = response['AccountAliases'][0]\r\n return account_name",
"def name(self):\n req = requests.get(\n f'https://api.vanguard.com/rs/ire/01/pe/fund/{self._fund_id}'\n '/profile.json',\n headers={'Referer': 'https://vanguard.com/'})\n req.raise_for_status() # Raise if error\n return req.json()['fundProfile']['longName']",
"def get_account_from(self):\n return str(self.gui.cmb_account_from.currentText())",
"def get_account(self):\n return self.fetch_data(\"account\")",
"def get_service_account_name():\n return app_identity.get_service_account_name()",
"def part_get_billing_account(self, username):\n return get_member(username).payment_account.billing_accounts[0]",
"def account(self):\n return self.__account",
"def get_name(self, user):\n return user.display_name",
"def account_name(self, flag):\n if flag:\n question = 'Your account name @'\n else:\n question = 'Their account name @'\n while True:\n acct = input(question)\n if (not re.match(r'^[a-z0-9\\-]+$', acct)\n or len(acct) == 0\n or len(acct) > 32):\n msg.message('The account name you entered is '\n + 'blank or contains invalid characters.')\n else:\n if xverify.steem.check_balances(acct):\n break\n else:\n msg.message('Steemit account ' + acct\n + ' could not be found.')\n return acct",
"def getAccount(self):\n result = self.getAccounts(1)\n if len(result) < 1:\n return None\n else:\n return result[0]",
"def getAcctHolderName(self):\n return self._acctHolderName",
"def account_names():\n return wallet['obj'].account_names",
"def get_account_to(self):\n return str(self.gui.cmb_account_to.currentText())",
"def donor_name(self):\n return self.name",
"def prepare_account_name_for_jwt(self, raw_account: str) -> str:\n account = raw_account\n if \".global\" not in account:\n # Handle the general case.\n idx = account.find(\".\")\n if idx > 0:\n account = account[0:idx]\n else:\n # Handle the replication case.\n idx = account.find(\"-\")\n if idx > 0:\n account = account[0:idx] # pragma: no cover\n # Use uppercase for the account identifier.\n return account.upper()",
"def user_display(user):\n return user.get_full_name()",
"def get_name(self) -> str:\n return self._user_display_name or self.load_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set whether it is single payment bill.
|
def set_is_single_bill_payment(self, is_single_bill_payment):
self.is_single_bill_payment = is_single_bill_payment
|
[
"def get_is_single_bill_payment(self):\n return self.is_single_bill_payment",
"def is_bill(self, is_bill):\n\n self._is_bill = is_bill",
"def set_paid(self):\n\n self.paid = True",
"def mark_paid(self):\n\t\tlogger.info('Marking bill %s as paid' % self.name)\n\t\tself.amount_due = 0.0\n\t\tself.due = False\n\t\tself.paid = True\n\t\tself.overdue = False\n\t\tstatus = self.get_status_as_str()\n\t\tlogger.debug(status)",
"def setPayment(self, payment):\n self.payment = payment",
"def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id = bill_payment_id",
"def setCommPaid(self, user):\n Commission.objects.filter(\n handyman=user,\n is_paid=False\n ).update(is_paid=True, paidout_date=timezone.now())\n return True",
"def payBooking(self, selectedBooking):\n selectedBooking.setPaid(True)",
"def bill_number(self, bill_number):\n self._bill_number = bill_number",
"def premium_single(self, premium_single):\n self._premium_single = premium_single",
"def is_pre_paid(self, is_pre_paid):\n\n self._is_pre_paid = is_pre_paid",
"def set_payoff(self):\r\n self.payoff = 0",
"def save(self, *args, **kwargs):\n super(FioPayment, self).save(*args, **kwargs)\n\n if self.order:\n self.order.update_paid_status()",
"def toggle_recharge_on(self):\n self.will_recharge = True",
"def contact_payment(self, contact_payment):\n\n self._contact_payment = contact_payment",
"def is_buying(self, is_buying):\n\n self._is_buying = is_buying",
"def action_post(self):\n res = super(AccountPayment, self).action_post()\n for rec in self:\n invoice = rec.move_id\n if invoice.book_issue_id and invoice.payment_state == \"paid\":\n invoice.book_issue_id.state = \"paid\"\n return res",
"def premium_single_smoker(self, premium_single_smoker):\n self._premium_single_smoker = premium_single_smoker",
"def set_as_president(self):\n with transaction.atomic():\n self.is_member = False\n self.is_secretary = False\n self.is_treasurer = False\n self.is_president = True\n self.is_inactive = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get whether it is single payment bill.
|
def get_is_single_bill_payment(self):
return self.is_single_bill_payment
|
[
"def set_is_single_bill_payment(self, is_single_bill_payment):\n self.is_single_bill_payment = is_single_bill_payment",
"def billable(self):\n\t\treturn self.status in self.BILL_STATUSES",
"def get_bill_payment_id(self):\n return self.bill_payment_id",
"def has_note(payment_id):\n return payment_id[:3] == '1A4'",
"def is_bill(self, is_bill):\n\n self._is_bill = is_bill",
"def return_payment_type_from_db(self):\n with sqlite3.connect('bangazon.db') as dbget:\n c = dbget.cursor()\n command = \"\"\"\n SELECT account_number\n FROM PaymentOption\n WHERE account_number = {}\n \"\"\".format(self.account_number)\n\n try:\n c.execute(command)\n except:\n return False\n\n account_info = c.fetchall()\n\n return True",
"def is_pre_paid(self):\n return self._is_pre_paid",
"def is_payment_and_invoice_complete(self):\n if self.payment.success and self.invoice.status == Invoice.InvoiceStatus.COMPLETE:\n return True\n return False",
"def get_invoice(self, period):\n\t\tinvoice_trasnaction = self.accounttransaction_set.filter(tx_type=\"0\", period_start=period)\n\n\t\tif invoice_trasnaction.count() == 1:\n\t\t\treturn invoice_trasnaction[0]\n\t\telse:\n\t\t\treturn False",
"def test_is_single(self):\n for order in self.orderList:\n bond = Bond(None, None, order=order)\n if order == 1:\n self.assertTrue(bond.is_single())\n else:\n self.assertFalse(bond.is_single())",
"def HasBDR(self):\n return self.__has('BDR')",
"def is_queue_first_car(self):\n first_car = self.parent_object.get_first_car()\n if first_car is None:\n return False\n if first_car.get_stream() == self:\n return True\n else:\n return False",
"def is_currency(self) -> bool:\r\n\t\treturn self.id in (800, 801, 2253, 2254, 2257, 2260, 2261)",
"def is_first_time_buyer(self):\n return self._is_first_time_buyer",
"def get_payment_type(self):\n return self.payment_type",
"def bill_number(self):\n return self._bill_number",
"def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoDetail_isOfType(self, type)",
"def get_is_base_currency(self):\n return self.is_base_currency",
"def is_comment(self):\n if Comment.objects.filter(id=self.id):\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Saving gaphor.UML model elements.
|
def test_save_uml(self):
self.element_factory.create(UML.Package)
self.element_factory.create(UML.Diagram)
self.element_factory.create(UML.Comment)
self.element_factory.create(UML.Class)
out = PseudoFile()
storage.save(XMLWriter(out), factory=self.element_factory)
out.close()
assert "<Package " in out.data
assert "<Diagram " in out.data
assert "<Comment " in out.data
assert "<Class " in out.data
|
[
"def save_model(self):\n f1 = open(self.name + '_' + 'words', 'w')\n f2 = open(self.name + '_' + 'word_lengths', 'w')\n f3 = open(self.name + '_' + 'stems', 'w')\n f4 = open(self.name + '_' + 'sentence_lengths', 'w')\n f5 = open(self.name + '_' + 'word_pair', 'w')\n f1.write(str(self.words))\n f2.write(str(self.word_lengths))\n f3.write(str(self.stems))\n f4.write(str(self.sentence_lengths))\n f5.write(str(self.word_pair))\n f1.close() \n f2.close() \n f3.close() \n f4.close()\n f5.close()",
"def save_eog_model(model):\n _save_model(model, EOG_MODEL_DIR)",
"def save_model(self):\n saver = PolicySaver(self.agent.policy)\n saver.save(self.model_dir)",
"def save(name, g):\n if not os.path.exists(\"graphs//\"):\n os.mkdir(\"graphs//\")\n write_graphml(g, \"graphs//\" + name + \".graphml\")",
"def _save_model(self, out_file):\n pass",
"def savemodel(self,filedir):\n\t\t__btm__.savemodel(self._handle,filedir)",
"def save_model(self):\n np.savetxt(\"weighth.csv\", self.wh, delimiter=\",\")\n np.savetxt(\"weighto.csv\", self.wo, delimiter=\",\")",
"def test_save_item(self):\n diagram = self.element_factory.create(UML.Diagram)\n diagram.create(CommentItem, subject=self.element_factory.create(UML.Comment))\n\n out = PseudoFile()\n storage.save(XMLWriter(out), factory=self.element_factory)\n out.close()\n\n assert \"<Diagram \" in out.data\n assert \"<Comment \" in out.data\n assert \"<canvas>\" in out.data\n assert ' type=\"CommentItem\"' in out.data, out.data",
"def save(self, path):",
"def save_model(self):\n\n\t\tmodel_file = open(self.model_path,'wb')\n\t\tpickle.dump(self.model, model_file)\n\t\tmodel_file.close()",
"def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)",
"def save(self) -> None:\n if self.meta.file_path:\n # We are a family root node or the user has decided to make us one\n # Save family information\n with self.meta.file_path.open('w') as of:\n of.write(self.to_json())\n\n # Now for saving language information\n # Sound changes cannot be serialized! So we can only save lexicon\n # information.\n if self.lexicon:\n self.lexicon.save(self.meta.lexicon_file_path)\n if self.lexicon_delta:\n self.lexicon_delta.save(self.meta.lexicon_delta_file_path)",
"def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))",
"def save_model(model, name):\n output_dir = \"alignment/%s\" % name\n np.save(\"%s/w1\" % output_dir, model.get_w1())\n np.save(\"%s/w2\" % output_dir, model.get_w2())\n np.save(\"%s/b1\" % output_dir, model.get_b1())\n np.save(\"%s/b2\" % output_dir, model.get_b2())",
"def save(self):\n self.vis.save([self.vis.env])",
"def save_model(self, filepath='model.h5'):\n self.model.save(filepath)",
"def save_model(paths, model):\n modelfile = join(paths[\"workdir\"], \"results\", paths[\"identifier\"], \"model\", paths[\"identifier\"]+\".gensim\")\n model.save(modelfile)",
"def save_model(self, path):\r\n torch.save(self.model.state_dict(), path)",
"def save_model(self, model):\n model_path, model_name = self.__get_keras_model_path()\n\n pickle.dump(model, open(model_path + model_name, \"wb\"))\n\n super(DecisionTreeUtil, self).save_pickles_used(Structure.DecisionTree, self.model.get_pickles_name(),\n model_name)\n\n self.logger.write_info(\"A new decision tree model has been created with the name of: \" + model_name + \"\\n\"\n \"In the path: \" + model_path + \"\\n\"\n \"This is the name that will be needed in the other strategies if you want to work with \"\n \"this model.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save a diagranm item too.
|
def test_save_item(self):
diagram = self.element_factory.create(UML.Diagram)
diagram.create(CommentItem, subject=self.element_factory.create(UML.Comment))
out = PseudoFile()
storage.save(XMLWriter(out), factory=self.element_factory)
out.close()
assert "<Diagram " in out.data
assert "<Comment " in out.data
assert "<canvas>" in out.data
assert ' type="CommentItem"' in out.data, out.data
|
[
"def _save_safe(self, cislo_lv, item):\n\n id_lv = self.saved_items.get(cislo_lv)\n if id_lv:\n save_whole_item(item, id_lv, logger=self.logger, cislo_lv=cislo_lv)\n else:\n id_lv = save_whole_item(item, logger=self.logger,\n cislo_lv=cislo_lv)\n self.saved_items[cislo_lv] = id_lv",
"def save(self, path):",
"def save_board(self):\r\n pass",
"def save_matrix(self, matrix):\n print(\"dumping \")\n path = self._create_path(self.dataset)\n print(path)\n print(matrix.sum())\n np.save(path, matrix)\n print(\"dumped to %s\" % path)",
"def save_item(self, item):\n data = {\n 'item_id': item.item_id,\n 'name': item.name,\n }\n # 'aisle': item.aisle,\n # 'category': item.category,\n # 'description': item.description,\n # 'image_url': item.image_url\n # }\n new_row_id = self._save(item.DB_TABLE_NAME, data, item.id)\n if new_row_id:\n item.id = new_row_id\n return new_row_id",
"def save(self, cstmd, estmd_id, animation_id):\n\n # General simulation data.\n sim = {\n 'description': cstmd.description,\n 'num_neurons': cstmd.num_neurons,\n 'num_synapses': cstmd.num_synapses,\n 'num_electrodes': cstmd.num_electrodes,\n 'num_efferents': cstmd.num_neurons * cstmd.num_electrodes,\n 'max_current': cstmd.max_current,\n 'min_current': cstmd.min_current,\n 'min_weight': cstmd.min_weight,\n 'max_weight': cstmd.max_weight,\n 'num_pixels': cstmd.num_pixels,\n 'duration': cstmd.spike_trains.shape[1],\n 'duration_per_frame': cstmd.duration,\n 'potassium': cstmd.potassium,\n 'sodium': cstmd.sodium,\n 'synaptic_distance': cstmd.synaptic_distance,\n 'animation_id': animation_id,\n 'estmd_id': estmd_id,\n 'num_plots': cstmd.num_plots\n }\n\n # Save general data.\n _id = self.collection.insert(sim)\n\n # Spike collection.\n collection = self.db.spikes\n\n # Save spikes.\n for dt in range(cstmd.spike_trains.shape[1]):\n obj = {\n \"sample_id\": _id,\n \"spikes\": cstmd.spike_trains[:, dt].tolist()\n }\n collection.insert(obj)\n\n return _id",
"def save_sequence(self, slot):\n slot = strict_discrete_set(slot, [1, 2, 3])\n self.write(f\"ARB:SAVE {slot}\")",
"def save_func(item, h5file):\n for idx, subitem in enumerate(item):\n if isinstance(subitem, np.ndarray):\n h5file.create_dataset(str(idx), data=subitem)\n elif isinstance(subitem, str):\n h5file.attrs[str(idx)] = subitem",
"def saveShelf():\n pass",
"def saveitem(self, item, oldname=None):\n\t\tself.store().saveitem(item, oldname)",
"def SaveItem(self, index = 0):\n if index >= self.length or index <0:\n warn( \"The list index specified is out of range\")\n return\n item = self.ItemList[index]\n if item.locked ==False:\n item.locked = True\n item.Draw()",
"def save(self):\n self.vis.save([self.vis.env])",
"def dbSave(self, env):\n\t\traise NotImplementedError, 'Flat File Saving Not Implemented'",
"def save_item(self, item: Any) -> None:\n item_rejected = False\n original_item = item\n for processor in self.config.item_processors:\n item = processor(item)\n if item is None:\n item_rejected = True\n break\n if item_rejected:\n logger.debug('item %s was rejected', original_item)\n return\n\n logger.debug('writing item %s to file %s', item, self.config.backup_filename)\n with self._lock:\n write_mp(self.config.backup_filename, item, mode='a', encoder=self.config.msgpack_encoder)",
"def saveMenu():\n pass",
"def _save_symbols_state(self):\r\n #filename = self.actualSymbols[0]\r\n #TODO: persist self.collapsedItems[filename] in QSettings\r\n pass",
"def save(self):\n self.parent.save()",
"def save_value(self, value):\n self.dual_variables[0].save_value(value)",
"def save_rollout(self, episode):\n\n complete_episode = self.compute_total_rewards(episode, self.gamma)\n # print(complete_episode)\n self.rollout_memory.append(complete_episode)\n self.flush_recorder_memory()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test connection loading of an association and two classes. (Should count for all linelike objects alike if this works).
|
def test_connection(self):
c1 = self.create(ClassItem, UML.Class)
c2 = self.create(ClassItem, UML.Class)
c2.matrix.translate(200, 200)
self.diagram.canvas.update_matrix(c2)
assert tuple(self.diagram.canvas.get_matrix_i2c(c2)) == (1, 0, 0, 1, 200, 200)
a = self.create(AssociationItem)
self.connect(a, a.head, c1)
self.connect(a, a.tail, c2)
self.diagram.canvas.update_now()
assert a.head.pos.y == 0, a.head.pos
assert a.tail.pos.x == 10, a.tail.pos
assert a.tail.pos.y == 200, a.tail.pos
assert a.subject
fd = StringIO()
storage.save(XMLWriter(fd), factory=self.element_factory)
data = fd.getvalue()
fd.close()
old_a_subject_id = a.subject.id
self.element_factory.flush()
assert not list(self.element_factory.select())
fd = StringIO(data)
storage.load(fd, factory=self.element_factory)
fd.close()
diagrams = list(self.kindof(UML.Diagram))
assert 1 == len(diagrams)
d = diagrams[0]
a = d.canvas.select(lambda e: isinstance(e, AssociationItem))[0]
assert a.subject is not None
assert old_a_subject_id == a.subject.id
cinfo_head = a.canvas.get_connection(a.head)
assert cinfo_head.connected is not None
cinfo_tail = a.canvas.get_connection(a.tail)
assert cinfo_tail.connected is not None
assert cinfo_head.connected is not cinfo_tail.connected
|
[
"def test_set_associations_2_adjacent(self):\n test_object = self.test.adjacent_association6\n actual = test_object._set_associations()\n expected_count = 4\n self.assertEqual(expected_count, len(actual))",
"def test_set_associations_1_adjacent(self):\n test_object = self.test.adjacent_association4\n actual = test_object._set_associations()\n expected_count = 3\n self.assertEqual(expected_count, len(actual))",
"def _link_associations_with_classes(self):\n log(f\"Try linking classes with associations\")\n class_entities = self.get_generic_entities(types=[ClassDiagramTypes.CLASS_ENTITY])\n assoc_entities = self.get_generic_entities(types=[ClassDiagramTypes.ASSOCIATION_ENTITY])\n advanced_entities = self.get_generic_entities(types=[ClassDiagramTypes.ASSOCIATION_ENTITY_ADVANCED])\n\n # Link class entities with remaining associations\n for c in class_entities:\n class_bounding_box = c.bounding_box(adjustment=constants.BOUNDING_BOX_ADJUSTMENT)\n\n # ... with advanced associations\n for a in advanced_entities:\n for advanced_shape in a.shapes:\n if type(advanced_shape) is Shape:\n advanced_bounding_box = advanced_shape.bounding_box()\n\n if util.do_bounding_boxes_intersect(advanced_bounding_box, class_bounding_box) or util.do_bounding_boxes_intersect(class_bounding_box, advanced_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_FROM, c)\n\n elif type(advanced_shape) is Line:\n line_start = advanced_shape.start_xy()\n line_end = advanced_shape.end_xy()\n\n if util.is_point_in_area(line_start, class_bounding_box) or util.is_point_in_area(line_end, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_TO, c)\n\n # ... with simple associations\n for a in assoc_entities:\n line = a.shapes[0] # GenericEntity of type ASSOCIATION_ENTITY always has just one shape, which is a Line\n line_start = line.start_xy()\n line_end = line.end_xy()\n\n if util.is_point_in_area(line_start, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_FROM, c)\n log(\"FROM association found\")\n\n elif util.is_point_in_area(line_end, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_TO, c)\n log(\"TO association found\")",
"def test_objects_prefetch_clause(self):\n ticket = (\n Ticket.objects()\n .prefetch(\n Ticket.all_related(),\n Ticket.concert.all_related(),\n Ticket.concert.band_1.all_related(),\n Ticket.concert.band_2.all_related(),\n )\n .first()\n .run_sync()\n )\n\n self.assertIsInstance(ticket.concert, Concert)\n self.assertIsInstance(ticket.concert.band_1, Band)\n self.assertIsInstance(ticket.concert.band_2, Band)\n self.assertIsInstance(ticket.concert.venue, Venue)\n self.assertIsInstance(ticket.concert.band_1.manager, Manager)\n self.assertIsInstance(ticket.concert.band_2.manager, Manager)",
"def test_save_set_associations(self):\n new_datum1 = mommy.make(\"datum.DatumObject\")\n new_datum2 = mommy.make(\"datum.DatumObject\")\n test_object = mommy.make(\"association.AssociationAdjacent\",\n parent_datum=new_datum1,\n child_datum=new_datum2\n )\n test_object.save()\n actual = test_object.all_associations\n expected_count = 3\n self.assertEqual(expected_count, len(actual))",
"def test_used_as_association_reifier (self):\n self._test_reifiable(self.create_association())",
"def test_analysis_link_classes(): \n AnalyzeROI.create()\n AnalyzeSED.create()",
"def test_classification_model(self):\n self.assertEqual(Classification.query.count(), 1)",
"def test_parser(self):\n parser = get_association_parser(GENCC)\n expected = [\n {\"subject\": \"HGNC:10896\", \"subject_label\": \"SKI\", \"object\": \"MONDO:0008426\"},\n {\n \"subject\": \"HGNC:16636\",\n \"subject_label\": \"KIF1B\",\n \"object\": \"MONDO:0008233\",\n \"object_label\": \"pheochromocytoma\",\n },\n ]\n with open(G2D_INPUT) as file:\n assocs = list(parser.parse(file))\n found = {}\n for association in assocs:\n logging.info(association)\n for e in expected:\n if all([getattr(association, k) == e[k] for k, v in e.items()]):\n found[frozenset(e.items())] = True\n for e in expected:\n self.assertIn(frozenset(e.items()), found, f\"Expected {e} not found in {found}\")",
"def test_relation(self):\n normal = Normal.objects.language('en').get(pk=self.normal_id[1])\n related = Related.objects.create(normal=normal)\n self.assertEqual(related.normal.pk, normal.pk)\n self.assertEqual(related.normal.shared_field, normal.shared_field)\n self.assertEqual(related.normal.translated_field, normal.translated_field)\n self.assertTrue(related in normal.rel1.all())",
"def test_has_ref(self):\r\n factory = TestFactory()\r\n rels = factory.get_relations_for(self.configuration, 'a.a')\r\n ret= rels.execute()\r\n self.assertTrue(ret)",
"def test_is_connected_2(set_of_sets):\n assert not is_connected(set_of_sets)",
"def test_collect_topology_comp_relations(self):\n topo_items = {\"datastores\": [{'mor_type': 'datastore','topo_tags': {'accessible': True, 'topo_type':\n 'vsphere-Datastore', 'capacity': 999922073600L, 'name': 'WDC1TB', 'url':\n '/vmfs/volumes/54183927-04f91918-a72a-6805ca147c55', 'type': 'VMFS', 'vms': ['UBUNTU_SECURE', 'W-NodeBox',\n 'NAT', 'Z_CONTROL_MONITORING (.151)', 'LEXX (.40)', 'parrot']}}], \"vms\": [], 'clustercomputeresource': [],\n 'computeresource': [], 'hosts': [], 'datacenters': []}\n\n config = {}\n self.load_check(config)\n instance = {'name': 'vsphere_mock', 'host': 'test-esxi'}\n self.check.get_topologyitems_sync = MagicMock(return_value=topo_items)\n self.check.collect_topology(instance)\n topo_instances = self.check.get_topology_instances()\n\n # Check if the returned topology contains 1 component\n self.assertEqual(len(topo_instances), 1)\n self.assertEqual(len(topo_instances[0]['components']), 1)\n self.assertEqual(topo_instances[0]['components'][0]['externalId'],\n 'urn:vsphere:/test-esxi/vsphere-Datastore/WDC1TB')\n\n # Check if the returned topology contains 6 relations for 6 VMs\n self.assertEqual(len(topo_instances[0]['relations']), 6)\n self.assertEqual(topo_instances[0]['relations'][0]['type']['name'], 'vsphere-vm-uses-datastore')",
"def test_load_chain(self):\n pass",
"def test_element_add_relationship_twice_is_ok():\n element1 = ConcreteElement(name=\"elt1\")\n element2 = ConcreteElement(name=\"elt1\")\n model = MockModel()\n element1.set_model(model)\n relationship = element1.add_relationship(destination=element2)\n element1.add_relationship(relationship)\n assert element1.relationships == {relationship}",
"def test_is_connected_1(set_of_sets):\n assert is_connected(set_of_sets)",
"def test_lazyload_scalar_relationship(self):\n\n def main(ssn: Session, query_logger: QueryLogger, reset: callable):\n # ### Test: load a relationship\n reset()\n # one, two, three, four = load_numbers()\n apple, orange, grape, plum, cherry, strawberry, tomato = load_fruits(\n default_columns(Number) # !!!\n .nplus1loader('*')\n )\n ssn.add(Number()) # stumbling block\n\n with query_logger, ssn.no_autoflush:\n # Make sure it's unloaded\n self.assertUnloaded('number', apple)\n self.assertUnloaded('number', orange)\n self.assertUnloaded('number', grape)\n self.assertUnloaded('number', plum)\n self.assertUnloaded('number', cherry)\n self.assertUnloaded('number', strawberry)\n self.assertUnloaded('number', tomato)\n\n # Trigger a lazyload of `fruits`\n apple.number\n self.assertMadeQueries(1) # one query to load them all\n self.assertLoaded('number', apple)\n self.assertLoaded('number', orange)\n self.assertLoaded('number', grape)\n self.assertLoaded('number', plum)\n self.assertLoaded('number', cherry)\n self.assertLoaded('number', strawberry)\n self.assertLoaded('number', tomato)\n\n # Freely access the `fruits` attribute on other objects: no additional queries\n self.assertEqual(apple.number.en, 'one')\n self.assertEqual(orange.number.en, 'one')\n self.assertEqual(grape.number.en, 'two')\n self.assertEqual(plum.number.en, 'two')\n self.assertEqual(cherry.number.en, 'three')\n self.assertEqual(strawberry.number.en, 'three')\n self.assertEqual(tomato.number, None)\n self.assertMadeQueries(0) # no additional queries\n\n def load_fruits(*options) -> List[Fruit]:\n return ssn.query(Fruit).options(*options).order_by(Fruit.id.asc()).all()\n\n ssn = self.Session()\n self._run_main(main, ssn)",
"def test_proxy_columns(self):\n # We call it multiple times to make sure it doesn't change with time.\n for _ in range(2):\n self.assertEqual(\n len(Concert.band_1.manager._foreign_key_meta.proxy_columns), 2\n )\n self.assertEqual(\n len(Concert.band_1._foreign_key_meta.proxy_columns), 4\n )",
"def test_one_organization_many_identity_providers(self):\n IdpOrganizationAssociation.objects.create(\n organization=self.organization,\n idp_identifier=\"https://some-other-idp.com/entity/id/\",\n )\n\n IdpOrganizationAssociation.objects.create(\n organization=self.organization,\n idp_identifier=\"https://my-idp.com/entity/id/\",\n )\n\n self.assertEqual(\n IdpOrganizationAssociation.objects.filter(\n organization=self.organization\n ).count(),\n 3,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Listen for requests on the named channel and dispatch them to the given handler. This method can be called multiple times.
|
def listen(self, name, handler):
|
[
"def handle_channel(self, input_channel=None):\n # type: (InputChannel) -> None\n input_channel.start_sync_listening(self.handle_message, self.tracker_store)",
"def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n break\n result = dispatch(self.state, raw_command)\n self.request.send(result)",
"async def handle(self):\n # For each channel, launch its own listening coroutine\n listeners = []\n for key in self.beat_config.keys():\n listeners.append(asyncio.ensure_future(\n self.listener(key)\n ))\n\n # For each beat configuration, launch it's own sending pattern\n emitters = []\n for key, value in self.beat_config.items():\n if isinstance(value, (list, tuple)):\n for v in value:\n emitters.append(asyncio.ensure_future(\n self.emitters(key, v)\n ))\n else:\n emitters.append(asyncio.ensure_future(\n self.emitters(key, value)\n ))\n\n # Wait for them all to exit\n await asyncio.wait(emitters)\n await asyncio.wait(listeners)",
"def serve(name, handler, host, port=0, backlog=socket.SOMAXCONN, timeout=None):\n\n assert backlog > 0\n\n try:\n listener = create_server(host, port, backlog, timeout)\n except Exception:\n log.reraise_exception(\n \"Error listening for incoming {0} connections on {1}:{2}:\", name, host, port\n )\n host, port = listener.getsockname()\n log.info(\"Listening for incoming {0} connections on {1}:{2}...\", name, host, port)\n\n def accept_worker():\n while True:\n try:\n sock, (other_host, other_port) = listener.accept()\n except (OSError, socket.error):\n # Listener socket has been closed.\n break\n\n log.info(\n \"Accepted incoming {0} connection from {1}:{2}.\",\n name,\n other_host,\n other_port,\n )\n handler(sock)\n\n thread = threading.Thread(target=accept_worker)\n thread.daemon = True\n hide_thread_from_debugger(thread)\n thread.start()\n\n return listener",
"def get_channel_handler_by_name(self, channel_name):\r\n with self.channel_resource_lock:\r\n if self.channel_resources.get(channel_name):\r\n return self.channel_resources[channel_name].handler\r\n return None",
"def dispatch_request(self,req):\r\n try:\r\n while True:\r\n handler = self.active_handlers.popleft()\r\n try:\r\n return self.send_request_to_handler(req,handler)\r\n finally:\r\n self.active_handlers.append(handler)\r\n except IndexError:\r\n return False",
"def listen(self):\n actions = {\n # gameplay events\n gameengine.Message.NEW_GAME: self.new_game,\n gameengine.Message.NEW_ROUND: self.new_round,\n gameengine.Message.ROUND_OVER: self.round_over,\n gameengine.Message.GAME_OVER: self.game_over,\n gameengine.Message.DONE: self.done,\n\n # player actions\n gameengine.Message.OFFER: self.handle_offer,\n gameengine.Message.BINDING_OFFER: self.handle_binding_offer,\n gameengine.Message.TRADE: self.handle_trade,\n gameengine.Message.WITHDRAW: self.handle_withdraw,\n }\n\n while not self.done_event.is_set():\n message = self.conn.recv()\n if message.text in actions:\n actions[message.text](message)",
"async def handle_request(\n self, request: JsonRpcRequest, result_channel: trio.MemorySendChannel,\n ) -> None:\n try:\n handler = self.get_handler(request.method)\n params = request.params\n if isinstance(params, list):\n result = await handler(*params)\n elif isinstance(params, dict):\n result = await handler(**params)\n else:\n result = await handler()\n except JsonRpcException as jre:\n result = jre\n except Exception as exc:\n logger.exception(\n 'An unhandled exception occurred in handler \"%s\"', handler.__name__,\n )\n result = JsonRpcInternalError(\"An unhandled exception occurred.\")\n await result_channel.send((request, result))",
"def add_channel_post_handler(self, handler_dict):\n self.channel_post_handlers.append(handler_dict)",
"def message_listener(self):\n queue = self.chatty_server.message_queues[self.username]\n while self.socket:\n if not queue.empty():\n message, message_type = queue.get()\n self.write(message, message_type=message_type)\n gevent.sleep(self.tick)",
"async def channel(self, name='default'):\n future = asyncio.Future(loop=self.loop)\n\n if not self._connecting.done(): # pragma: no cover\n self.log.debug('Await connecting...')\n await self._connecting\n\n if name in self._channels_opening:\n if not self._channels_opening[name].done():\n self.log.debug('Channel already opening, wait it...')\n return await self._channels_opening[name]\n\n if name in self._channels and self._channels[name].is_open:\n future.set_result(self._channels[name])\n return await future\n\n self._channels_opening[name] = self._create_future()\n\n def on_channel(channel: pika.channel.Channel):\n \"\"\"On channel closed handler.\n \"\"\"\n channel.add_on_close_callback(self.on_channel_closed)\n channel.basic_qos(prefetch_count=100)\n self._channels[name] = channel\n try:\n self._channels_opening[name].set_result(channel)\n except asyncio.InvalidStateError: # pragma: no cover\n pass\n future.set_result(channel)\n\n self.connection.channel(on_open_callback=on_channel)\n return await asyncio.wait_for(future, timeout=DECLARE_CHANNEL_TIMEOUT)",
"def serve_forever(self):\r\n while 1:\r\n self.handle_request()",
"def invoke(self, *args, **kwargs):\n for handler in self.handlers:\n if handler is not None:\n handler(*args, **kwargs)",
"def listen(self, request_dict):\n while True:\n gevent.sleep(1) # switch to request co-routine\n # use select to get readable event\n readable, writable, exceptional = select.select(self.inputs, self.outputs, self.inputs, 0)\n\n if not (readable or writable or exceptional):\n print 'No input event'\n continue\n\n # polling readable event\n for s in readable:\n buf = s.recv(9999)\n data = buf\n while len(buf): # read until there's no data\n buf = s.recv(9999)\n data += buf\n if data:\n # print 'recv data:', data, 'from', s.getpeername()\n self.message_queue[s].put(data) # put data into message_queue\n self.inputs.remove(s) # remove socket because only wait for one response\n s.close()\n else:\n # no data received\n print 'close the connection', s.getpeername()\n if s in self.outputs:\n self.outputs.remove(s)\n self.inputs.remove(s)\n s.close()\n del self.message_queue[s]\n\n # exceptional event\n for s in exceptional:\n print \"exceptional connection:\", s.getpeername()\n self.inputs.remove(s)\n if s in self.outputs:\n self.outputs.remove(s)\n s.close()\n del self.message_queue[s]\n\n # check if all requests have been answered\n if DcaProtocol.check_termination(self.message_queue, request_dict):\n print 'All requests have been answered'\n return request_dict\n return",
"def handle(self, handler: Handler):\n pass",
"def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])",
"def invoke(self, *args, **kwargs):\n for handler in self.handlers:\n if handler is not None:\n return handler(*args, **kwargs)",
"def dispatch(self):\r\n for action in Server.instance.actions:\r\n if action.enabled:\r\n try:\r\n action.on_event(self)\r\n except Exception as error:\r\n Log.Error(f\"[{Server.instance.name}] failed to run action {action.name}\")\r\n traceback.print_exc()",
"def call_handler(self):\n try:\n self.count += 1\n self.time = time.time()\n self.handler(self)\n except Exception:\n g.es_exception()\n self.stop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve a clone of an existing basket
|
def clone(self):
position_set = deepcopy(self.position_set)
return Basket(position_set=position_set, clone_parent_id=self.id, parent_basket=self.ticker)
|
[
"def test_copyBasket(self):\n basket1 = self.createBasket()\n basket1.addItem(\"beans\")\n basket1.addItem(\"spaghetti hoops\")\n\n basket2 = self.createBasket()\n basket2.copyFrom(basket1)\n\n self.assertEqual(basket1.total(), basket2.total())\n self.assertEqual(basket2.total(), beans.price() + spaghettiHoops.price())\n self.assertEqual(basket1.savings(), basket2.savings())",
"def clone(self, *args):\n return _coin.SoNodekitCatalog_clone(self, *args)",
"def clone(self, actor):\n\n try:\n c = get_cursor()\n\n access_id = new_access_id(16)\n c.execute(\"\"\"\n insert into cart\n (\n access_id,\n cart_status_id,\n shipping_id,\n cc_encrypt\n )\n select %s, %s, shipping_id, cc_encrypt from cart\n where cart_id = %s\"\"\",\n (access_id, STATUS_NEW, self.cart['cart_id']))\n new_cart_id = c.lastrowid\n\n for line_item in self.cart['line_items']:\n c.execute(\"\"\"\n insert into line_item (\n cart_id,\n product_id,\n price,\n quantity,\n seq\n )\n values ( %s, %s, %s, %s, %s )\"\"\",\n (new_cart_id, line_item['product_id'], line_item['price'],\n line_item['quantity'], line_item['seq']))\n new_line_item_id = c.lastrowid\n new_build_id = Build.clone(line_item['build_access_id'], new_line_item_id)\n\n c.execute(\"\"\"\n insert into address\n (\n cart_id,\n bill_first_name,\n bill_last_name,\n bill_company_name,\n bill_address1,\n bill_address2,\n bill_city,\n bill_state_id,\n bill_province,\n bill_postal_code,\n bill_country_id,\n bill_phone,\n ship_first_name,\n ship_last_name,\n ship_company_name,\n ship_address1,\n ship_address2,\n ship_city,\n ship_state_id,\n ship_province,\n ship_postal_code,\n ship_country_id,\n ship_phone,\n email\n )\n select %s,\n bill_first_name,\n bill_last_name,\n bill_company_name,\n bill_address1,\n bill_address2,\n bill_city,\n bill_state_id,\n bill_province,\n bill_postal_code,\n bill_country_id,\n bill_phone,\n ship_first_name,\n ship_last_name,\n ship_company_name,\n ship_address1,\n ship_address2,\n ship_city,\n ship_state_id,\n ship_province,\n ship_postal_code,\n ship_country_id,\n ship_phone,\n email\n from address\n where address.cart_id = %s\"\"\",\n (new_cart_id, self.cart['cart_id']))\n\n import db.Cart as Cart\n new_cart = Cart.ShoppingCart(cart_id=new_cart_id)\n new_cart.recompute()\n new_cart.log(\"Cloned from cart {}.\".format(self.cart['cart_id']), actor)\n self.log(\"Cloned into cart {}.\".format(new_cart_id), actor)\n return new_cart_id\n\n except Exception as e:\n print \"Internal error: \" + e.args[0]\n import traceback\n traceback.print_exc()\n raise DbError(\"Internal error: \" + e.args[0])",
"def clone(self):\n return self.__clone(True)",
"def _clone(context, obj, clone_id):\n return context.manage_clone(obj, clone_id)",
"def clone(self, data):",
"def create(self, codes):\n basket = None\n connection_factory = factory.connection_factory(self.connection_factory_type)\n try:\n with connection_factory.get_connection() as client:\n document = {\"items\": codes,\n \"promos\": [],\n \"state\": BasketState.OPEN.name,\n \"created\": datetime.datetime.utcnow()}\n basket = client.farmers.basket.save(document)\n if basket:\n return basket\n self.logger.error(\"Error in creating basket!\")\n except errors.DuplicateKeyError as duplicate_key_error:\n self.logger.error(duplicate_key_error)\n return None",
"def clone(self, name='', datastore_id=-1):\n self.client.call(self.METHODS['clone'], self.id, name, datastore_id)",
"def new_basket():\n return render_template('new_basket.html')",
"def read(self, _id):\n basket = None\n connection_factory = factory.connection_factory(self.connection_factory_type)\n try:\n with connection_factory.get_connection() as client:\n _filter = {\"_id\": ObjectId(_id)}\n basket = client.farmers.basket.find_one(_filter)\n if basket:\n return basket\n self.logger.error(\"Could not find basket with id %s\", _id)\n except Exception as exception:\n self.logger.error(exception)\n return None",
"def clone(self) -> \"ScXMLDataObj *\":\n return _coin.ScXMLSbDataObj_clone(self)",
"def clone(self, type: 'SoType') -> \"SoNodekitCatalog *\":\n return _coin.SoNodekitCatalog_clone(self, type)",
"def show_basket(basket_id):\n basket = fruit_baskets.find_one({'_id': ObjectId(basket_id)})\n return render_template('show_basket.html', basket=basket)",
"def copy(self):\n new_piece = Bishop(self.pos, self.team)\n new_piece.moved = self.moved\n return new_piece",
"def clone(self):\n return Savepoint('',\n impl=invoke(lib.serialboxSavepointCreateFromSavepoint, self.__savepoint))",
"def clone_transaction(transaction, new_portfolio = None):\n \n out = Transaction()\n out.portfolio = (new_portfolio if new_portfolio != None else transaction.portfolio)\n out.type = transaction.type\n out.as_of_date = transaction.as_of_date\n out.symbol = transaction.symbol\n out.quantity = transaction.quantity\n out.price = transaction.price\n out.total = transaction.total\n out.linked_symbol = transaction.linked_symbol\n \n return out",
"def clone_CloneQuestionnaire(request, id):\n return CloneQuestionnaire_update(request, id)",
"def copy(self, in_place: bool = False) -> list:\n new_ingredients = self.copy_ingredients()\n if in_place:\n self.ingredient_list = new_ingredients\n else:\n new_list = GroceryList(self.copy_ingredients())\n return new_list",
"def clone(self) -> \"ScXMLElt *\":\n return _coin.ScXMLElt_clone(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Upload basket composition history
|
def upload_position_history(self, position_sets: List[PositionSet]) -> Dict:
if self.default_backcast:
raise MqValueError('Unable to upload position history: option must be set during basket creation')
historical_position_sets = []
for position_set in position_sets:
self.__validate_position_set(position_set)
positions = [IndicesPositionInput(p.asset_id, p.weight) for p in position_set.positions]
historical_position_sets.append(IndicesPositionSet(tuple(positions), position_set.date))
response = GsIndexApi.backcast(self.id, CustomBasketsBackcastInputs(tuple(historical_position_sets)))
return response.as_dict()
|
[
"def process_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def save_history(cube, field, filename): \n\n try:\n history.append(cube.attributes['history'])\n except KeyError:\n pass",
"def uploadtoDB(date, item, place, cost):",
"def create_basket():\n basket = {\n 'name': request.form.get('name'),\n 'recipient': request.form.get('recipient'),\n 'fruits': [],\n 'fruits_count': 0\n }\n # Add each fruit in basket - quantity starts at 0\n for fruit in all_fruits:\n updated_fruit = {\n 'name': fruit['name'],\n 'cost': fruit['cost'],\n 'img_url': fruit['img_url'],\n 'quantity': 0,\n }\n basket['fruits'].append(updated_fruit)\n\n basket_id = fruit_baskets.insert_one(basket).inserted_id\n return redirect(url_for('show_basket', basket_id=basket_id))",
"def upload_company_cbhistory(request):\n data = {'title': 'Import Company CB History Data'}\n addGlobalData(request, data)\n\n if not data['company'] or not data['has_access_to_company']:\n return HttpResponseRedirect(reverse('companies'))\n\n try:\n # Read the file and its sheets and returns an OrderedDict of DataFrames\n df = pd.read_excel(request.FILES['file'], sheet_name=['CBHISTORY'])\n\n # ITEM DataFrame\n for _, row in df['CBHISTORY'].iterrows():\n cbid = row.get('CBID', '')\n cus_accno = row.get('ACCNO', '')\n distr_dea = row.get('DEA', '')\n doc_type = row.get('DOCTYPE', '844')\n cb_date = str(row.get('DATE', '')).split(' ')[0]\n cb_number = row.get('NUMBER', '')\n cb_type = '00' if pd.isna(row['TYPE']) or int(row['TYPE']) == 0 else '15'\n\n cb_resub_no = '' if pd.isna(row['RESUBNO']) else row['RESUBNO']\n cb_resub_desc = '' if pd.isna(row['RESUBDESC']) else row['RESUBDESC']\n original_cb_id = '' if pd.isna(row['ORIGCBID']) else row['ORIGCBID']\n\n claim_subtotal = row.get('SUBTOTAL', None)\n claim_calculate = row.get('CALCULATE', None)\n claim_issue = row.get('ISSUE', None)\n claim_adjustment = row.get('ADJUSTMENT', None)\n lines_count = row.get('LINECOUNT', 0)\n\n cm_number = row.get('CMNO', None)\n cm_date = str(row.get('CMDATE', '')).split(' ')[0]\n cm_amount = row.get('CMAMOUNT', None)\n\n if cbid:\n # get Direct Customer obj (from acc number)\n direct_customer = DirectCustomer.objects.filter(account_number=cus_accno)\n customer_id = direct_customer[0].get_id_str() if direct_customer else None\n\n # get Distribution Center obj (from dea number)\n distr_dea = DistributionCenter.objects.filter(dea_number=distr_dea)\n distr_dea_id = distr_dea[0].get_id_str() if distr_dea else None\n\n # CB History obj\n cbhistory, _ = ChargeBackHistory.objects.get_or_create(cbid=cbid)\n cbhistory.chargeback_id = None\n cbhistory.customer_id = customer_id\n cbhistory.distribution_center_id = distr_dea_id\n cbhistory.import844_id = None\n cbhistory.document_type = doc_type\n cbhistory.date = convert_string_to_date_imports(cb_date) if cb_date else None\n cbhistory.type = cb_type\n cbhistory.number = cb_number\n\n cbhistory.resubmit_number = cb_resub_no\n cbhistory.resubmit_description = cb_resub_desc\n cbhistory.original_chargeback_id = original_cb_id\n\n cbhistory.claim_subtotal = Decimal(claim_subtotal) if claim_subtotal else None\n cbhistory.claim_calculate = Decimal(claim_calculate) if claim_calculate else None\n cbhistory.claim_issue = Decimal(claim_issue) if claim_issue else None\n cbhistory.claim_adjustment = Decimal(claim_adjustment) if claim_adjustment else None\n cbhistory.total_line_count = int(lines_count) if lines_count else 0\n\n cbhistory.is_received_edi = False\n cbhistory.accounting_credit_memo_number = cm_number\n cbhistory.accounting_credit_memo_date = convert_string_to_date_imports(cm_date) if cm_date else None\n cbhistory.accounting_credit_memo_amount = Decimal(cm_amount) if cm_amount else None\n\n cbhistory.stage = STAGE_TYPE_ARCHIVED\n cbhistory.substage = SUBSTAGE_TYPE_NO_ERRORS\n\n cbhistory.save()\n\n return ok_json()\n\n except Exception as ex:\n print(ex.__str__())\n return bad_json(message=ex.__str__())",
"def submit_basket():\n basket_id = request.form.get('basket_id')\n basket = fruit_baskets.find_one({'_id': ObjectId(basket_id)})\n fruits_count = 0\n\n # Update the quantity of each fruit in basket, according to user-entered\n # form fields\n for fruit in basket['fruits']:\n quantity = int(request.form.get(fruit['name']))\n fruit['quantity'] = quantity\n fruits_count += quantity\n\n # Update the count to be the sum of all fruits\n basket['fruits_count'] = fruits_count\n\n # Update the database entry\n fruit_baskets.update_one(\n {'_id': ObjectId(basket_id)},\n {'$set': basket}\n )\n return redirect(url_for('show_basket', basket_id=basket_id))",
"def finalize_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def archive(self):\n\n if not self.save:\n return\n\n persist_log = IXLanIXFMemberImportLog.objects.create(ixlan=self.ixlan)\n for action in [\"delete\", \"modify\", \"add\"]:\n for info in self.actions_taken[action]:\n netixlan = info[\"netixlan\"]\n version_before = info[\"version\"]\n\n versions = reversion.models.Version.objects.get_for_object(netixlan)\n\n if version_before:\n versions = versions.filter(id__gt=version_before.id)\n version_after = versions.last()\n else:\n version_after = versions.first()\n\n if not version_after:\n continue\n\n # push for data change notification (#403)\n DataChangeNotificationQueue.push(\n \"ixf\", action, netixlan, version_before, version_after, **info\n )\n\n persist_log.entries.create(\n netixlan=netixlan,\n version_before=version_before,\n action=action,\n reason=info.get(\"reason\"),\n version_after=version_after,\n )",
"def save(self, item_kind, files_too, base_func):\n for item in getattr(self, item_kind)():\n basename = HipDump.slugify(base_func(item))\n pathname = \"{}/{}/{}\".format(self.path, item_kind, basename)\n filename = \"{}/{}.json\".format(pathname, basename)\n # Combine any saved chats with newly fetched chats and save.\n try:\n with open(filename) as f:\n chats = json.load(f)\n since = chats[0][\"date\"]\n except IOError:\n chats, since = [], None # No saved chats.\n try:\n new_chats = list(self.chats(item, since))\n except hypchat.requests.HttpNotFound:\n # TODO: threw this in when I thought it was a deleted user\n # causing the error, but It's not that and I didn't investigate.\n logging.debug(\"Not found: {}\".format(pathname))\n continue\n if len(new_chats):\n chats = new_chats + chats\n HipDump.mkdir(pathname)\n HipDump.write_json(filename, chats)\n if not len(chats):\n logging.debug(\"No history for \" + item.name)\n continue\n logging.debug(u\"{}: {} ({})\".format(filename, basename, len(chats)))\n # Fetch any unfetched files in the entire history.\n if files_too:\n self.files(chats, pathname)\n HipDump.avatar_download(item, pathname, basename)",
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def build_history_files(id, data_id, source, memo, tags):\n api = rest.PreprocessingApi(configuration.get_api_client())\n for entry in os.listdir(source):\n if os.path.isdir(os.path.join(source, entry)):\n uploaded_files = []\n for root, _, files in os.walk(os.path.join(source, entry)):\n for file in files:\n upload_info = object_storage.upload_file(api.api_client, os.path.join(root, file), 'Data')\n uploaded_files.append(rest.ComponentsAddFileInputModel(file_name=upload_info.file_name,\n stored_path=upload_info.stored_path))\n model = rest.PreprocessingApiModelsAddOutputDataInputModel(files=uploaded_files, name=entry,\n memo=memo, tags=list(tags))\n api.add_preprocessing_history_files(id, data_id, body=model)\n\n api.complete_preprocessing_history(id, data_id)",
"def upload_summaries(self):\n logger.info(\"Upload summaries.\")\n db_connect.wipe_database_upload(model_version_id=self.model_version_id,\n conn_def=self.conn_def)\n data = self.data_summaries[['model_version_id', 'year_id', 'location_id', 'sex_id',\n 'age_group_id', 'mean_cf', 'lower_cf', 'upper_cf',\n 'inserted_by',\n 'last_updated_by', 'last_updated_action']].reset_index(drop=True)\n db_connect.write_data(df=data, db='cod', table='model', conn_def=self.conn_def)",
"def update_submitted_workflows_file(self):\n if not self.submitted.empty():\n with self.submitted_workflows.open(\"rb\") as f:\n workflows = pickle.load(f)\n\n added_wfs = []\n while not self.submitted.empty():\n wf = self.submitted.get()\n added_wfs.append(wf)\n\n ensemble, trigger, workflow = wf\n\n workflows[ensemble][trigger].append(workflow)\n\n with self.submitted_workflows.open(\"wb\") as f:\n pickle.dump(workflows, f)\n\n self.log.debug(\"{} added to submitted workflows file\".format(added_wfs))",
"def commit(self):\n self.jobs.extend(self.newjobs)\n self.newjobs = []",
"def add_list_to_basket(self, file_list):\n for file_name in file_list:\n self.click_add_to_basket_icon(file_name)",
"def save_stocks(self, stocks):\n pass",
"def save_history(self):\n return 0",
"def saveLastUpload(self, timestamp, fileset):\n timeStampFile = os.path.join(self.weewx_root, \"#%s.last\" % self.name)\n with open(timeStampFile, \"wb\") as f:\n cPickle.dump(timestamp, f)\n cPickle.dump(fileset, f)",
"def add_scoops_to_bag(request):\n\n quantity = int(request.POST.get('quantity'))\n scoop_one = request.POST.get('scoop_one')\n scoop_one_id = json.loads(scoop_one)['id']\n scoop_two = request.POST.get('scoop_two')\n scoop_two_id = json.loads(scoop_two)['id']\n scoop_three = request.POST.get('scoop_three')\n scoop_three_id = json.loads(scoop_three)['id']\n scoop_four = request.POST.get('scoop_four')\n scoop_four_id = json.loads(scoop_four)['id']\n scoop_five = request.POST.get('scoop_five')\n scoop_five_id = json.loads(scoop_five)['id']\n scoop_six = request.POST.get('scoop_six')\n scoop_six_id = json.loads(scoop_six)['id']\n scoop_seven = request.POST.get('scoop_seven')\n scoop_seven_id = json.loads(scoop_seven)['id']\n scoop_eight = request.POST.get('scoop_eight')\n scoop_eight_id = json.loads(scoop_eight)['id']\n scoop_nine = request.POST.get('scoop_nine')\n scoop_nine_id = json.loads(scoop_nine)['id']\n scoop_ten = request.POST.get('scoop_ten')\n scoop_ten_id = json.loads(scoop_ten)['id']\n\n ten_scoops = [\n scoop_one_id,\n scoop_two_id,\n scoop_three_id,\n scoop_four_id,\n scoop_five_id,\n scoop_six_id,\n scoop_seven_id,\n scoop_eight_id,\n scoop_nine_id,\n scoop_ten_id\n ]\n\n custom_mix = Bags(\n name='CUSTOM MIX',\n duplicate_scoops=False,\n description='Custom selection of pick n mix',\n weight='1KG',\n price=12.00\n )\n custom_mix.save()\n\n dupe_scoops = list(duplicates(ten_scoops))\n\n if dupe_scoops:\n custom_mix.duplicate_scoops = True\n custom_mix.duplicate_scoop_ids = json.dumps(ten_scoops)\n custom_mix.save()\n\n for scoop in ten_scoops:\n custom_mix.scoops.add(scoop)\n\n bag_item_id = custom_mix.pk\n bag_name = custom_mix.name\n redirect_url = request.POST.get('redirect_url')\n cart = request.session.get('cart', {})\n\n if bag_item_id in list(cart.keys()):\n cart[bag_item_id] += quantity\n messages.success(\n request, f'{bag_name} quantity updated to {cart[bag_item_id]}!'\n )\n else:\n cart[bag_item_id] = quantity\n messages.success(request, f'{bag_name} have been added to your cart!')\n\n request.session['cart'] = cart\n return redirect(redirect_url)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the most recent rebalance data for a basket Usage Retrieve the most recent rebalance data for a basket Examples Retrieve the most recent rebalance data for a basket >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_latest_rebalance_data() See also
|
def get_latest_rebalance_data(self) -> Dict:
return GsIndexApi.last_rebalance_data(self.id)
|
[
"def get_latest_rebalance_date(self) -> dt.date:\n last_rebalance = GsIndexApi.last_rebalance_data(self.id)\n return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()",
"def get_latest_bar(self, exchange, symbol):\n try:\n bars_list = self.latest_symbol_data[exchange][symbol]\n except KeyError:\n logger.error(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1]",
"def get_latest_bar(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise # Reraise the current exception in an exception handler to be handled further up the call stack.\n else:\n return bars_list[-1]",
"def get_last_db_backup(db_backups_dir='backups', project_name='biz'):\n download_last_db_backup(db_backups_dir=db_backups_dir, project_name=project_name)",
"def get_last_refresh():\n try:\n conn = create_connection()\n cursor = conn.cursor()\n\n query = \"SELECT symbol,last_refreshed FROM Company_meta_data;\"\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n\n except Exception as e:\n logger.error(e)\n return False\n finally:\n # closing database connection.\n if conn.is_connected():\n cursor.close()\n conn.close()",
"def get_latest_release():\n return json.load(urllib.request.urlopen(_LATEST_URL))",
"def get_last_snapshot(self):\n name = self.snapshot_names[-1]\n return self.get_snapshot(name)",
"def _get_latest_data(self):\n if self._latest_version is not None:\n return self._data[self._latest_version]\n else:\n if self.has_type(ArrayType.NUMPY):\n return self._data[ArrayType.NUMPY]\n else:\n return self._data[ArrayType.MXNET]",
"def get_market_last(symbols=None, output_format='json', **kwargs):\r\n return Last(symbols, output_format, **kwargs).fetch()",
"def get_last_trade(self, ticker):\n end_point = f\"https://api.polygon.io/v1/last/stocks/{ticker}?apiKey={self.API_KEY}\"\n content = requests.get(end_point)\n data = content.json()\n return data[\"last\"]",
"def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']",
"def get_latest_price_from_cache(self):\n if not self.cache:\n raise Exception(\"Empty Cache\")\n time_series = self.cache[\"Time Series (1min)\"]\n keys = sorted(time_series.keys())\n latest_date = keys[-1]\n latest_data = time_series[latest_date]\n latest_data['timestamp'] = latest_date\n return latest_data",
"def get_bank_latest_rates(bank_id):\n r = aliased(Rate)\n max_dates = Rate.query.with_entities(func.max(r.update_time).label('maxdate')). \\\n filter(and_(Rate.bank_id == r.bank_id, Rate.bank_id == bank_id))\n return Rate.query.filter_by(update_time=max_dates).first()",
"def get_latest_blob():\n storage_client = storage.Client()\n bucket_name = config.UNPROCESSED_BUCKET_NAME\n bucket = storage_client.lookup_bucket(bucket_name)\n\n if bucket is None:\n logger.critical(\"Bucket does not exist. Exiting program.\")\n return None\n\n blobs = list(storage_client.list_blobs(bucket_name))\n logger.debug(f\"blobs {blobs}\")\n latest_blob = max(blobs, key=lambda x: x.updated, default=None)\n\n return latest_blob",
"def get_latest_bar_datetime(self, exchange, symbol):\n try:\n bars_list = self.latest_symbol_data[exchange][symbol]\n except KeyError:\n logger.error(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1][0]",
"def read_last_price(table,db='rofex.db',conn=None):\n table = rename_table(table)\n if conn == None:\n conn = make_connection(db)\n query = 'SELECT LA_price FROM \"{}\" ORDER BY date DESC LIMIT 1'.format(table)\n c = conn.cursor()\n value = c.execute(query).fetchone()\n return value[0]",
"def get_latest_bars(self, exchange, symbol, N=1):\n try:\n bars_list = self.latest_symbol_data[exchange][symbol]\n except KeyError:\n logger.error(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-N:]",
"def get_my_latest_blood_pressure(self):\n return self.get_user_latest_blood_pressure(self.request.user)",
"def get_latest_post():\n red = redis.Redis(host = 'localhost', db = config.subfeed_db)\n reddit = praw.Reddit(user_agent=config.my_user_agent,\n client_id=config.my_client_id,\n client_secret=config.my_client_secret) \n subfeed_key = \"sorted_lfc\"\n unique_new_list = []\n subreddit = reddit.subreddit(config.subfeed_subreddit)\n for submission in subreddit.hot(limit=config.subfeed_limit):\n current_time = int(time.time())\n present_in_db = red.zadd(subfeed_key, submission.id, current_time)\n if present_in_db == 1:\n submission_link = \"https://www.reddit.com\" + submission.permalink\n unique_new_list.append(submission_link)\n return unique_new_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the most recent rebalance date for a basket
|
def get_latest_rebalance_date(self) -> dt.date:
last_rebalance = GsIndexApi.last_rebalance_data(self.id)
return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()
|
[
"def get_latest_rebalance_data(self) -> Dict:\n return GsIndexApi.last_rebalance_data(self.id)",
"def get_bank_latest_rates(bank_id):\n r = aliased(Rate)\n max_dates = Rate.query.with_entities(func.max(r.update_time).label('maxdate')). \\\n filter(and_(Rate.bank_id == r.bank_id, Rate.bank_id == bank_id))\n return Rate.query.filter_by(update_time=max_dates).first()",
"def get_last_trade_date():\n df = sf.get_stock('IBM')\n return df.index.max()",
"def last(self) -> datetime.date:\n return self.__dates__[-1]",
"def max_comeback_date(self):\n return self._max_comeback_date",
"def get_latest_bar_datetime(self, exchange, symbol):\n try:\n bars_list = self.latest_symbol_data[exchange][symbol]\n except KeyError:\n logger.error(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1][0]",
"def date_latest(self):\n dt = self.sort_date_latest()\n return self._adjust_for_precision(dt, 1.0)",
"def get_latest_date_for_stock(stock_name):\n\n engine = get_db_connection()\n query = (\"SELECT max(date) FROM\" + \" \" + surround_mysql_quotes(stock_name))\n result = engine.run(query)\n # Get latest date for stock in database\n try:\n latest_date = (result[0][0].strftime(\"%Y-%m-%d\"))\n return latest_date\n except Exception as e:\n print(e)\n return None",
"def check_most_recent_date(schema, table):\n\n query_job = bq_client.query(\"SELECT * FROM \" + schema + \".\" + table + \" ORDER BY date DESC LIMIT 1\")\n\n results = query_job.result()\n results = list(results)\n date = results[0]['date']\n\n return date",
"def getDateOfLastReleaseData(self):\n sql = \"SELECT MAX(date) as date \"\n sql +=\"FROM terrabrasilis.deter_table \"\n sql +=\"WHERE date <= (SELECT date FROM public.deter_publish_date)\"\n\n return self.__execSQL(sql)",
"def get_recent_item(date):\n logger.debug(\"Requested the recent item added on %s\", date)\n return spark_query_engine.get_recent_item_api(date)",
"def lastSalaryGradeChange(self, date: date = timezone.now().date()):\n sgcs = SalaryGradeChange.objects.filter(rse=self).order_by('-date')\n for sgc in sgcs:\n if sgc.date <= date:\n return sgc\n # Unable to find any data\n raise ValueError('No Salary Data exists before specified date period for this RSE')",
"def get_last_update_time():\n return Rate.query.with_entities(func.max(Rate.update_time)).one()[0]",
"def date_debut(self):\n return self.__date_debut",
"def get_latest_close_price(ticker, price_date: datetime, max_looback: int):\n\n if max_looback not in range(1, 10):\n raise ValidationError(\n \"Invalid 'max_looback'. Allowed values are [1..10]\", None)\n\n looback_date = price_date - timedelta(days=max_looback)\n\n price_dict = get_daily_stock_close_prices(ticker, looback_date, price_date)\n\n price_date = sorted(list(price_dict.keys()), reverse=True)[0]\n\n return (price_date, price_dict[price_date])",
"def get_last_savingdate(self):\n history = self.get_history()\n latest_date = datetime(1990,1,1)\n for line in history:\n if datetime.strptime(line[0],\"%Y-%m-%d-%H-%M-%S\") > latest_date:\n latest_date = datetime.strptime(line[0],\"%Y-%m-%d-%H-%M-%S\")\n return latest_date",
"def last_date(self, position):\n return dt.datetime.strptime(self.chain.iloc[position]['LastDate'], '%Y-%m-%d')",
"def get_sell_date(self) -> datetime:\n return self.sell_date",
"def last_accessed(self, key: _K) -> datetime.datetime:\n try:\n item = self._data[key]\n except KeyError as err:\n raise ValueError(f'Key not found: {key}') from err\n return item.last_accessed"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.